import time
import json
import requests
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from .base import BaseCrawler
from urllib.parse import quote, urljoin
from ..models import CrawledData

class WeiboCrawler(BaseCrawler):
    """微博爬虫实现"""
    
    BASE_URL = 'https://m.weibo.cn'
    SEARCH_API = 'https://m.weibo.cn/api/container/getIndex'
    
    def __init__(self, task):
        super().__init__(task)
        self.session = requests.Session()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'X-Requested-With': 'XMLHttpRequest',
            'MWeibo-Pwa': '1',
            'Referer': 'https://m.weibo.cn/search?containerid=100103type%3D1%26q%3D'
        }
        self.is_running = False
        
        # 从任务参数中获取配置
        self.keywords = self.task.parameters.get('keywords', [])
        self.start_date = self.task.parameters.get('start_date')
        self.end_date = self.task.parameters.get('end_date')
        self.max_pages = self.task.parameters.get('max_pages', 10)
        
        # 设置请求配置
        self.config = self.task.data_source.credentials
        if self.config.get('use_proxy') and self.config.get('proxy'):
            self.session.proxies = self.config['proxy']
        self.session.timeout = self.config.get('timeout', 10)
    
    def validate_credentials(self):
        """验证是否可以访问微博"""
        try:
            response = self.session.get(
                f'{self.SEARCH_API}?containerid=100103type%3D1%26q%3Dtest',
                headers=self.headers,
                allow_redirects=False
            )
            return response.status_code == 200 and 'data' in response.json()
        except Exception as e:
            print(f"验证访问失败: {e}")
            return False
    
    def search_weibo(self, keyword, page):
        """搜索微博内容"""
        try:
            # 构建搜索参数
            params = {
                'containerid': f'100103type=1&q={quote(keyword)}',
                'page_type': 'searchall',
                'page': page
            }
            
            print(f"\nSearching with params: {params}")
            
            for attempt in range(self.config.get('max_retries', 3)):
                try:
                    print(f"Attempt {attempt + 1}: Sending request...")
                    response = self.session.get(
                        self.SEARCH_API,
                        params=params,
                        headers=self.headers
                    )
                    print(f"Response status code: {response.status_code}")
                    
                    if response.status_code == 200:
                        print("Response received successfully")
                        json_data = response.json()
                        print(f"Response data: {json.dumps(json_data)[:500]}...")
                        return json_data
                    print(f"Request failed with status code: {response.status_code}")
                    time.sleep(2 ** attempt)  # 指数退避
                except requests.RequestException as e:
                    print(f"Request failed (attempt {attempt + 1}): {e}")
                    if attempt == self.config.get('max_retries', 3) - 1:
                        raise
                    time.sleep(2 ** attempt)
            return None
        except Exception as e:
            print(f"Search error: {e}")
            self.update_progress(
                self.task.progress,
                f"搜索微博时出错: {str(e)}"
            )
            return None
    
    def parse_weibo_data(self, json_data):
        """解析微博数据"""
        if not json_data or 'data' not in json_data:
            return []
        
        try:
            cards = json_data['data'].get('cards', [])
            parsed_data = []
            
            for card in cards:
                if card.get('card_type') != 9:  # 9表示微博卡片
                    continue
                
                mblog = card.get('mblog')
                if not mblog:
                    continue
                
                # 提取图片
                pics = []
                if 'pics' in mblog:
                    for pic in mblog['pics']:
                        if 'large' in pic:
                            pics.append(pic['large']['url'])
                
                # 提取话题
                topics = []
                if 'topic_struct' in mblog:
                    topics = [topic['topic_title'] for topic in mblog['topic_struct']]
                
                # 提取@用户
                mentions = []
                if 'user_mentions' in mblog:
                    mentions = [user['screen_name'] for user in mblog['user_mentions']]
                
                # 构建微博数据
                weibo_data = {
                    'id': mblog['id'],
                    'text': mblog['text'],
                    'created_at': mblog['created_at'],
                    'source': mblog.get('source', ''),
                    'reposts_count': mblog.get('reposts_count', 0),
                    'comments_count': mblog.get('comments_count', 0),
                    'attitudes_count': mblog.get('attitudes_count', 0),
                    'pictures': pics,
                    'topics': topics,
                    'mentions': mentions,
                    'user': {
                        'id': mblog['user']['id'],
                        'screen_name': mblog['user']['screen_name'],
                        'profile_url': f"https://weibo.com/u/{mblog['user']['id']}",
                        'followers_count': mblog['user'].get('followers_count', 0),
                        'verified': mblog['user'].get('verified', False)
                    }
                }
                parsed_data.append(weibo_data)
            
            print(f"Parsed {len(parsed_data)} weibo posts")
            return parsed_data
            
        except Exception as e:
            print(f"Error parsing weibo data: {e}")
            return []
    
    def start(self):
        """开始爬取数据"""
        if not self.validate_credentials():
            self.complete_task(success=False)
            self.update_progress(0, "无法访问微博")
            return
        
        self.is_running = True
        total_processed = 0
        
        try:
            for keyword in self.keywords:
                if not self.is_running:
                    break
                
                for page in range(1, self.max_pages + 1):
                    if not self.is_running:
                        break
                    
                    raw_data = self.search_weibo(keyword, page)
                    if raw_data:
                        parsed_data = self.parse_weibo_data(raw_data)
                        for item in parsed_data:
                            if not self.is_running:
                                break
                            
                            self.save_data(
                                data=item,
                                metadata={
                                    'keyword': keyword,
                                    'page': page,
                                    'crawled_at': datetime.now().isoformat()
                                }
                            )
                            
                            total_processed += 1
                            progress = min(
                                int((total_processed / (len(self.keywords) * self.max_pages * 10)) * 100),
                                99
                            )
                            self.update_progress(progress)
                    
                    # 避免请求过于频繁
                    time.sleep(2)
            
            if self.is_running:
                self.complete_task(success=True)
                self.update_progress(100)
        except Exception as e:
            self.update_progress(
                self.task.progress,
                f"爬取过程中出错: {str(e)}"
            )
            self.complete_task(success=False)
        finally:
            self.is_running = False
    
    def stop(self):
        """停止爬取数据"""
        self.is_running = False
    
    def save_data(self, data, metadata=None):
        """保存爬取的数据
        Args:
            data: 解析后的微博数据
            metadata: 元数据信息
        """
        try:
            # 构建结构化的数据
            structured_data = {
                'post': {
                    'id': data['id'],
                    'content': data['text'],
                    'created_at': data['created_at'],
                    'source': data.get('source', ''),
                    'engagement': {
                        'reposts': data.get('reposts_count', 0),
                        'comments': data.get('comments_count', 0),
                        'likes': data.get('attitudes_count', 0)
                    },
                    'media': {
                        'pictures': data.get('pictures', []),
                        'topics': data.get('topics', []),
                        'mentions': data.get('mentions', [])
                    }
                },
                'user': {
                    'id': data['user']['id'],
                    'name': data['user']['screen_name'],
                    'profile_url': data['user'].get('profile_url', ''),
                    'followers_count': data['user'].get('followers_count', 0),
                    'verified': data['user'].get('verified', False)
                }
            }

            # 构建元数据
            metadata = metadata or {}
            metadata.update({
                'platform': 'weibo',
                'data_type': 'post',
                'crawled_at': datetime.now().isoformat(),
                'keyword': metadata.get('keyword', ''),
                'page': metadata.get('page', 1)
            })

            # 保存到数据库
            CrawledData.objects.create(
                task=self.task,
                data=structured_data,
                metadata=metadata
            )

            self.crawled_items += 1
            self.task.crawled_items = self.crawled_items
            self.task.save()

        except Exception as e:
            print(f"Error saving data: {e}")
            self.update_progress(
                self.task.progress,
                f"保存数据时出错: {str(e)}"
            ) 