import os
import django
import time
from datetime import datetime, timedelta

# 设置Django环境
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'social_media_analysing.settings')
django.setup()

from django.contrib.auth import get_user_model
from data_analysis.models import DataSource
from data_crawler.models import CrawlerTask, CrawledData
from data_crawler.crawlers.weibo import WeiboCrawler
from data_crawler.tests.config import WEIBO_CONFIG

User = get_user_model()

def main():
    try:
        # 创建或获取测试用户
        user, created = User.objects.get_or_create(
            username='test_user',
            defaults={
                'email': 'test@example.com',
                'password': 'test123'
            }
        )
        
        # 创建数据源
        data_source = DataSource.objects.create(
            name='微博数据源',
            type='weibo',
            credentials=WEIBO_CONFIG,
            is_active=True,
            user=user
        )
        print(f"Created data source with ID: {data_source.id}")
        print(f"Data source credentials: {WEIBO_CONFIG}")
        
        # 创建爬虫任务
        task = CrawlerTask.objects.create(
            name='微博数据爬取',
            data_source=data_source,
            parameters={
                'keywords': ['Python编程', '人工智能', '数据分析'],
                'max_pages': 3,  # 每个关键词爬取3页
                'start_date': (datetime.now() - timedelta(days=7)).strftime('%Y-%m-%d'),  # 最近7天的数据
                'end_date': datetime.now().strftime('%Y-%m-%d')
            },
            user=user
        )
        print(f"Created crawler task with ID: {task.id}")
        print(f"Task parameters: {task.parameters}")
        
        # 初始化并运行爬虫
        crawler = WeiboCrawler(task)
        print("\nTesting crawler access...")
        is_valid = crawler.validate_credentials()
        print(f"Crawler access test result: {is_valid}")
        
        print("\nStarting crawler...")
        # 测试单个关键词的搜索
        print("\nTesting search for 'Python编程'...")
        results = crawler.search_weibo("Python编程", 1)
        if results:
            print(f"Search results: {results}")
            parsed_data = crawler.parse_weibo_data(results)
            print(f"Parsed data: {parsed_data}")
        else:
            print("No search results found")
        
        # 运行完整爬虫
        crawler.start()
        
        # 等待爬虫完成
        while task.status != 'completed':
            task.refresh_from_db()
            print(f"Progress: {task.progress}%, Status: {task.status}")
            if task.status == 'failed':
                print(f"Task failed: {task.error_message}")
                break
            time.sleep(5)
        
        print("\nCrawler finished!")
        print(f"Final status: {task.status}")
        print(f"Error message: {task.error_message}")
        
        # 显示爬取到的数据
        crawled_data = CrawledData.objects.filter(task=task)
        print(f"\nTotal items collected: {crawled_data.count()}")
        
        print("\nSample of crawled data:")
        for data in crawled_data[:5]:  # 显示前5条数据
            try:
                if not data or not data.data:
                    print("Skipping invalid data entry")
                    continue
                    
                print("\n-----------------------------------")
                
                # 安全地获取数据
                post_data = data.data.get('post', {})
                if not post_data:
                    print("No post data available")
                    continue
                    
                # 获取并显示关键字
                keyword = data.metadata.get('keyword', 'Unknown')
                print(f"Keyword: {keyword}")
                
                # 获取并显示创建时间
                created_at = post_data.get('created_at', 'Unknown')
                print(f"Created at: {created_at}")
                
                # 获取并显示文本内容
                text = post_data.get('content', '')
                if text:
                    print(f"Text: {text[:200]}...")  # 只显示前200个字符
                else:
                    print("No text content available")
                
                # 获取并显示话题
                topics = post_data.get('media', {}).get('topics', [])
                if topics:
                    print(f"Topics: {', '.join(topics)}")
                else:
                    print("No topics available")
                
                # 获取并显示互动数据
                engagement = post_data.get('engagement', {})
                print(f"Reposts: {engagement.get('reposts', 0)}")
                print(f"Comments: {engagement.get('comments', 0)}")
                print(f"Likes: {engagement.get('likes', 0)}")
                
            except Exception as e:
                print(f"Error processing data entry: {e}")
                continue
        
    except Exception as e:
        print(f"Error occurred: {e}")
        import traceback
        traceback.print_exc()
    finally:
        # 清理资源
        if 'task' in locals():
            task.delete()
        if 'data_source' in locals():
            data_source.delete()

if __name__ == '__main__':
    main() 