from celery import shared_task
from django.utils import timezone
from django.db import transaction
from .models import (
Movie, RatingDistribution, MovieType, 
YearDistribution, CountryStats, CrawlLog
)
from .utils.redis_utils import RedisClient
import requests
from bs4 import BeautifulSoup
import time
import random
import logging
import re
from urllib.parse import unquote
# 配置日志
logger = logging.getLogger(__name__)
redis_client = RedisClient()
 # 异常类定义
class SpiderError(Exception):
    """爬虫异常基类"""
    pass
class PageFetchError(SpiderError):
    """页面获取异常"""
    pass
class ParseError(SpiderError):
    """解析异常"""
    pass
 # User-Agent列表
USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
 ]
def check_redis_connection():
    """检查Redis连接"""
    try:
        redis_client.set_key('test_connection', 'test', expire=10)
        return True
    except Exception as e:
        logger.error(f"Redis连接失败: {str(e)}")
        return False
class DoubanSpider:
    """豆瓣电影爬虫类"""
    def __init__(self):
        self.session = requests.Session()
        self.base_url = 'https://movie.douban.com/top250'
        self.movies = []
    def get_random_ua(self):
        """获取随机User-Agent"""
        return random.choice(USER_AGENTS)
    def get_headers(self):
        """获取随机请求头"""
        return {
            'User-Agent': self.get_random_ua(),
            'Accept-Language': random.choice(['zh-CN,zh;q=0.9', 'enUS,en;q=0.8,zh-CN;q=0.7']),
            'Accept': 
    'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Referer': random.choice([
                'https://movie.douban.com/',
                'https://movie.douban.com/top250',
                'https://www.douban.com/',
                'https://www.google.com/'
            ])
        }
    def clean_text(self, text):
        """清理文本"""
        if not text:
            return ""
        text = unquote(text)
        text = re.sub(r'\s+', ' ', text)
        text = re.sub(r'[/\\]+', '/', text)
        text = re.sub(r'\s*/\s*', '/', text)
        text = re.sub(r'\.{3,}', '', text)
        text = re.sub(r'\s+$', '', text)
        return text.strip()
    def get_page(self, page, retry_count=3):
        """获取页面内容"""
        for attempt in range(retry_count):
            try:
                url = f'{self.base_url}?start={(page-1)*25}'
                
                # 减少随机延迟时间
                delay = random.uniform(8, 15)  # 改为8-15秒
                logger.info(f"等待 {delay:.2f} 秒后发起请求...")
                time.sleep(delay)
                
                # 设置请求头和cookies
                current_headers = self.get_headers()
                cookies = {
                    'bid': ''.join(random.choice('0123456789abcdef') for _ in range(11)),
                    'll': '"108288"',
                    'ct': str(int(time.time() * 1000))
                }
                
                logger.info(f"正在请求: {url}, 尝试次数: {attempt+1}/{retry_count}")
                response = self.session.get(
                    url, 
                    headers=current_headers, 
                    cookies=cookies, 
                    timeout=15,
                    verify=True
                )
                
                if "验证码" in response.text or response.status_code == 403:
                    logger.warning("可能被限制访问，等待更长时间...")
                    time.sleep(random.uniform(30, 60))  # 等待30-60秒
                    continue
                    
                response.raise_for_status()
                logger.info(f"成功获取页面，大小: {len(response.text)} 字节")
                
                return response.text
                
            except Exception as e:
                logger.error(f"获取页面异常: {url}, 错误: {str(e)}")
                if attempt < retry_count - 1:
                    time.sleep(random.uniform(20, 30))  # 重试前等待20-30秒
                    continue
                raise PageFetchError(f"获取页面失败: {str(e)}")
    def parse_movie(self, item):
        """解析电影信息"""
        try:
            # 获取排名
            rank_elem = item.select_one('em')
            if not rank_elem:
                raise ParseError("未找到电影排名")
            rank = int(rank_elem.text.strip())
            
            # 获取标题
            titles = item.select('span.title')
            if not titles:
                raise ParseError("未找到电影标题")
            title = titles[0].text.strip()
            original_title = titles[1].text.strip().lstrip('/') if len(titles) > 1 else ""
            # 获取评分
            rating_elem = item.select_one('.rating_num')
            if not rating_elem:
                raise ParseError("未找到电影评分")
            rating = float(rating_elem.text.strip())
            # 获取电影信息
            info_elem = item.select_one('.bd p')
            if not info_elem:
                raise ParseError("未找到电影详细信息")
            info = info_elem.text.strip()
            
            # 解析导演和演员信息
            director = ""
            actors = ""
            director_actor_text = info.split('\n')[0].strip()
            director_match = re.search(r'导演:\s*(.*?)(?:\s*主演:|$)', director_actor_text)
            if director_match:
                director = director_match.group(1).strip()
            actors_match = re.search(r'主演:\s*(.*?)$', director_actor_text)
            if actors_match:
                actors = actors_match.group(1).strip()
            
            # 解析年份、国家和类型
            year_country_genre = info.split('\n')[1].strip()
            year_match = re.search(r'\b(\d{4})\b', year_country_genre)
            year = int(year_match.group(1)) if year_match else None
            
            country_match = re.search(r'\d{4}\s*/\s*([^/]+)', year_country_genre)
            country = country_match.group(1).strip() if country_match else ""
            
            genre_match = re.search(r'\d{4}\s*/\s*[^/]+\s*/\s*(.*)', year_country_genre)
            genre = genre_match.group(1).strip() if genre_match else ""
            # 获取图片URL
            image_elem = item.select_one('img')
            if not image_elem or 'src' not in image_elem.attrs:
                raise ParseError("未找到电影图片")
            image_url = image_elem['src']
            # 获取简介
            # intro_elem = item.select_one('.quote .inq')
            # intro = intro_elem.text.strip() if intro_elem else ""

            # 获取简介
            intro = "暂无简介"
            quote_p = item.find('p', class_='quote')
            if quote_p:
                span = quote_p.find('span')
                if span:
                    intro = span.get_text(strip=True)    
            # 解析类型
            genre = self.clean_text(genre)  # 假设genre已经解析出来

            return {
                'rank': rank,
                'title': self.clean_text(title),
                'original_title': self.clean_text(original_title),
                'rating': rating,
                'director': self.clean_text(director),
                'actors': self.clean_text(actors),
                'year': year,
                'country': self.clean_text(country),
                'genre': self.clean_text(genre),
                'intro': self.clean_text(intro),
                'image_url': image_url
            }
        except Exception as e:
            logger.error(f"解析电影信息失败: {str(e)}")
            raise ParseError(f"解析电影信息失败: {str(e)}")
    def clean_movie_data(self, movie_data):
        """清理电影数据"""
        return {
            'rank': movie_data['rank'],
            'title': movie_data['title'],
            'original_title': movie_data['original_title'],
            'rating': float(movie_data['rating']),
            'director': movie_data['director'],
            'actors': movie_data['actors'],
            'year': movie_data['year'],
            'country': movie_data['country'],
            'genre': movie_data['genre'],
            'intro': movie_data['intro'],
            'image_url': movie_data['image_url']
        }
    def crawl_page(self, page):
        """爬取单页电影数据"""
        retry_count = 0
        max_retries = 3
        page_movies = []
        
        while retry_count < max_retries:
            try:
                logger.info(f"正在爬取第 {page} 页")
                html = self.get_page(page)
                
                soup = BeautifulSoup(html, 'lxml')
                items = soup.select('.item')
                
                if not items:
                    logger.warning(f"第 {page} 页未找到电影数据")
                    break
                for item in items:
                    try:
                        movie = self.parse_movie(item)
                        if movie:
                            cleaned_movie = self.clean_movie_data(movie)
                            page_movies.append(cleaned_movie)
                    except ParseError as e:
                        logger.error(f"解析电影数据失败: {str(e)}")
                        continue
                # 随机延迟，避免被封
                delay = random.uniform(1, 3)
                logger.debug(f"等待 {delay:.2f} 秒后继续")
                time.sleep(delay)
                
                break  # 成功爬取，跳出重试循环
                
            except Exception as e:
                retry_count += 1
                logger.error(f"爬取第 {page} 页时发生错误: {str(e)}")
                if retry_count < max_retries:
                    logger.info(f"第 {retry_count} 次重试...")
                    time.sleep(2)  # 重试前等待
                else:
                    logger.error(f"爬取第 {page} 页失败，已达到最大重试次数")
        
        return page_movies
def save_movies_batch(movies, batch_size=10):
    """分批保存电影数据"""
    for i in range(0, len(movies), batch_size):
        batch = movies[i:i + batch_size]
        try:
            with transaction.atomic():
                for movie_data in batch:
                    Movie.objects.create(**movie_data)
        except Exception as e:
            logger.error(f"保存电影批次 {i//batch_size + 1} 失败: {str(e)}")
            # 单个电影保存失败时继续处理其他电影
            continue
# def save_statistics(movies):
#     """保存统计数据"""
#     try:
#         with transaction.atomic():
#             # 清空现有统计数据
#             RatingDistribution.objects.all().delete()
#             MovieType.objects.all().delete()
#             YearDistribution.objects.all().delete()
#             CountryStats.objects.all().delete()
            
#             # 更新评分分布
#             rating_dist = {}
#             for movie in movies:
#                 rating = round(movie['rating'], 1)
#                 rating_dist[rating] = rating_dist.get(rating, 0) + 1
            
#             for rating, count in rating_dist.items():
#                 RatingDistribution.objects.create(rating=rating, count=count)
            
#             # 更新电影类型分布
#             type_dist = {}
#             for movie in movies:
#                 types = movie['genre'].split('/')
#                 for t in types:
#                     t = t.strip()
#                     type_dist[t] = type_dist.get(t, 0) + 1
            
#             for type_name, count in type_dist.items():
#                 MovieType.objects.create(type_name=type_name, count=count)
            
#             # 更新年份分布
#             year_dist = {}
#             for movie in movies:
#                 year = movie['year']
#                 if year:
#                     year_dist[year] = year_dist.get(year, 0) + 1
            
#             for year, count in year_dist.items():
#                 YearDistribution.objects.create(year=year, count=count)
            
#             # 更新国家分布
#             country_dist = {}
#             for movie in movies:
#                 countries = movie['country'].split('/')
#                 for country in countries:
#                     country = country.strip()
#                     country_dist[country] = country_dist.get(country, 0) + 1
            
#             for country, count in country_dist.items():
#                 CountryStats.objects.create(country=country, count=count)
#     except Exception as e:
#         logger.error(f"保存统计数据失败: {str(e)}")
#         raise

# def save_statistics(movies):
#     """保存统计数据"""
#     try:
#         with transaction.atomic():
#             # 清空现有统计数据
#             RatingDistribution.objects.all().delete()
#             MovieType.objects.all().delete()
#             YearDistribution.objects.all().delete()
#             CountryStats.objects.all().delete()
#             # 更新评分分布
#             rating_dist = {}
#             for movie in movies:
#                 rating = round(movie['rating'], 1)
#                 rating_dist[rating] = rating_dist.get(rating, 0) + 1
#             for rating, count in rating_dist.items():
#                 RatingDistribution.objects.create(rating=rating, count=count)
#             # 更新电影类型分布
#             type_dist = {}
#             for movie in movies:
#                 types = movie['genre'].split('/')
#                 for t in types:
#                     t = t.strip()
#                     type_dist[t] = type_dist.get(t, 0) + 1
#             for type_name, count in type_dist.items():
#                 MovieType.objects.create(type_name=type_name, count=count)
#             # 更新年份分布
#             year_dist = {}
#             for movie in movies:
#                 year = movie['year']
#                 if year:
#                     year_dist[year] = year_dist.get(year, 0) + 1
#             for year, count in year_dist.items():
#                 YearDistribution.objects.create(year=year, count=count)
#             # 更新国家分布
#             country_dist = {}
#             for movie in movies:
#                 countries = movie['country'].split('/')
#                 for country in countries:
#                     country = country.strip()
#                     country_dist[country] = country_dist.get(country, 0) + 1
#             for country, count in country_dist.items():
#                 CountryStats.objects.create(country=country, count=count)
#     except Exception as e:
#         logger.error(f"保存统计数据失败: {str(e)}")
#         raise

def save_statistics(movies):
    """保存统计数据（补充 avg_rating、top_movie 逻辑）"""
    try:
        with transaction.atomic():
            # 清空现有统计数据
            RatingDistribution.objects.all().delete()
            MovieType.objects.all().delete()
            YearDistribution.objects.all().delete()
            CountryStats.objects.all().delete()

            # 更新评分分布（原有逻辑不变）
            rating_dist = {}
            for movie in movies:
                rating = round(movie['rating'], 1)
                rating_dist[rating] = rating_dist.get(rating, 0) + 1
            for rating, count in rating_dist.items():
                RatingDistribution.objects.create(rating=rating, count=count)

            # 更新电影类型分布（原有逻辑不变）
            type_dist = {}
            for movie in movies:
                types = movie['genre'].split('/')
                for t in types:
                    t = t.strip()
                    type_dist[t] = type_dist.get(t, 0) + 1
            for type_name, count in type_dist.items():
                MovieType.objects.create(type_name=type_name, count=count)

            # 更新年份分布（原有逻辑不变）
            year_dist = {}
            for movie in movies:
                year = movie['year']
                if year:
                    year_dist[year] = year_dist.get(year, 0) + 1
            for year, count in year_dist.items():
                YearDistribution.objects.create(year=year, count=count)

            # -------------------- 新增国家分布高级统计 --------------------
            # 1. 预处理：按国家分组，存储所有电影数据
            country_movies = {}
            for movie in movies:
                countries = movie['country'].split('/')
                for country in countries:
                    country = country.strip()
                    if country not in country_movies:
                        country_movies[country] = []
                    country_movies[country].append(movie)

            # 2. 按国家计算统计指标（数量、平均评分、最高评分电影）
            country_stats_list = []
            for country, movies in country_movies.items():
                # 计算数量（兼容原有逻辑）
                count = len(movies)

                # 计算平均评分
                total_rating = sum(movie['rating'] for movie in movies)
                avg_rating = round(total_rating / count, 1) if count > 0 else None

                # 找最高评分的电影（可能有多个，取第一个）
                top_movie = max(movies, key=lambda x: x['rating']) if movies else None

                # 构造 CountryStats 数据
                country_stats_list.append({
                    'country': country,
                    'count': count,
                    'avg_rating': avg_rating,
                    'top_movie_title': top_movie['title'] if top_movie else None,
                    'top_movie_rating': top_movie['rating'] if top_movie else None,
                    'top_movie_year': top_movie['year'] if top_movie and top_movie['year'] else None,
                })

            # 3. 批量创建 CountryStats
            CountryStats.objects.bulk_create([
                CountryStats(**stats) for stats in country_stats_list
            ])

    except Exception as e:
        logger.error(f"保存统计数据失败: {str(e)}")
        raise


@shared_task(bind=True)
def crawl_douban_movies(self, sync_mode=False):
    """爬取豆瓣电影数据的Celery任务"""
    task_id = self.request.id if not sync_mode else 'sync_task'
    logger.info(f"开始执行爬虫任务，任务ID: {task_id}")
    # 检查Redis连接
    if not check_redis_connection():
        logger.error("Redis连接失败，无法执行爬虫任务")
        return False
    # 创建爬虫日志
    crawl_log = CrawlLog.objects.create(status='running')
    
    try:
        # 检查是否正在爬取（仅在非同步模式下检查）
        if not sync_mode and redis_client.exists_key('crawling_status'):
            logger.warning("已有爬虫任务正在运行")
            crawl_log.status = 'failed'
            crawl_log.error_message = "已有爬虫任务正在运行"
            crawl_log.end_time = timezone.now()
            crawl_log.save()
            return False
        
        # 设置爬取状态（仅在非同步模式下设置）
        if not sync_mode:
            redis_client.set_key('crawling_status', 'running', expire=3600)
        
        # 记录开始时间
        start_time = time.time()
        redis_client.set_key('last_crawl_start', start_time)
        
        # 初始化进度信息
        total_pages = 10
        current_page = 0
        total_movies = 0
        
        # 更新任务状态（仅在非同步模式下更新）
        if not sync_mode:
            self.update_state(
                state='PROGRESS',
                meta={
                    'current': current_page,
                    'total': total_pages,
                    'status': '开始爬取',
                    'movies_count': 0
                }
            )
        
        # 执行爬虫逻辑
        spider = DoubanSpider()
        movies = []
        
        for page in range(1, total_pages + 1):
            current_page = page
            try:
                logger.info(f"正在爬取第 {page}/{total_pages} 页")
                
                # 更新进度信息
                progress = {
                    'current_page': current_page,
                    'total_pages': total_pages,
                    'status': f'正在爬取第 {current_page} 页',
                    'movies_count': len(movies),
                    'percentage': int((current_page / total_pages) * 100)
                }
                
                # 保存进度到Redis
                redis_client.set_key('crawl_progress', progress)
                
                # 更新Celery任务状态（仅在非同步模式下更新）
                if not sync_mode:
                    self.update_state(
                        state='PROGRESS',
                        meta=progress
                    )
                
                # 获取页面数据
                page_movies = spider.crawl_page(page)
                movies.extend(page_movies)
                
                # 更新电影数量
                total_movies = len(movies)
                logger.info(f"当前已爬取 {total_movies} 部电影")
                
            except Exception as e:
                logger.error(f"爬取第 {page} 页时发生错误: {str(e)}")
                # 更新错误状态
                progress['status'] = f'第 {page} 页爬取失败: {str(e)}'
                redis_client.set_key('crawl_progress', progress)
                continue
        
        # 更新最终进度
        final_progress = {
            'current_page': total_pages,
            'total_pages': total_pages,
            'status': '爬取完成，正在保存数据',
            'movies_count': total_movies,
            'percentage': 100
        }
        redis_client.set_key('crawl_progress', final_progress)
        
        try:
            # 清空现有电影数据
            Movie.objects.all().delete()
            
            # 分批保存电影数据
            save_movies_batch(movies)
            
            # 保存统计数据
            save_statistics(movies)
            
        except Exception as e:
            logger.error(f"保存数据时发生错误: {str(e)}")
            raise
 
        # 记录详细的日志
        logger.info(f"成功爬取 {total_movies} 部电影")

        
        # 保存爬取结果到Redis
        redis_client.set_key('last_crawl_result', {
            'count': total_movies,
            'timestamp': time.time(),
            'status': 'success'
        })
        
        # 更新爬虫日志
        crawl_log.status = 'success'
        crawl_log.movie_count = total_movies
        crawl_log.end_time = timezone.now()
        crawl_log.save()
        
        # 清除进度信息
        redis_client.delete_key('crawl_progress')
        
        return True
        
    except Exception as e:
        # 记录错误信息
        error_message = str(e)
        logger.error(f"爬虫任务失败: {error_message}")
        redis_client.set_key('last_crawl_error', error_message)
        
        # 更新爬虫日志
        crawl_log.status = 'failed'
        crawl_log.error_message = error_message
        crawl_log.end_time = timezone.now()
        crawl_log.save()
        
        # 更新任务状态（仅在非同步模式下更新）
        if not sync_mode:
            self.update_state(state='FAILURE', meta={'error': error_message})
        
        return False
        
    finally:
        # 清除爬取状态（仅在非同步模式下清除）
        if not sync_mode:
            redis_client.delete_key('crawling_status')

from celery import shared_task
from django.core.cache import cache
@shared_task
def update_statistics():
    """异步更新统计数据"""
    try:
        # 更新年份分布       
        YearDistribution.update_year_distribution()
        # 清除相关缓存
        cache.delete('year_line_data')
        return True
    except Exception as e:
        logger.error(f"更新统计数据失败: {str(e)}")
        return False
    
from .services import WorldMapDataService

