#!/usr/bin/env python3
"""
添加演示数据脚本
"""

import sqlite3
import json
import uuid
from datetime import datetime

def add_demo_project():
    """添加演示项目"""
    print("📝 添加演示项目...")
    
    conn = sqlite3.connect('data/research.db')
    cursor = conn.cursor()
    
    # 创建演示项目
    project_id = 'demo_project_001'
    
    # 检查是否已存在
    existing = cursor.execute('SELECT id FROM projects WHERE id = ?', (project_id,)).fetchone()
    if existing:
        print("   ⚠️  演示项目已存在，跳过创建")
        conn.close()
        return project_id
    
    cursor.execute('''
        INSERT INTO projects (
            id, title, description, research_area, methodology, timeline, 
            objectives, status, progress
        ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
    ''', (
        project_id,
        '基于深度学习的自然语言处理研究',
        '本研究旨在探索深度学习技术在自然语言处理任务中的应用，重点关注Transformer架构的优化和改进。通过设计新的注意力机制和预训练策略，提升模型在文本理解、生成和翻译等任务上的性能。',
        '人工智能',
        '实验研究',
        '12个月',
        json.dumps([
            '设计并实现改进的Transformer架构',
            '在多个NLP基准数据集上验证模型性能',
            '分析不同注意力机制的效果',
            '发表高质量学术论文'
        ], ensure_ascii=False),
        'active',
        25.0
    ))
    
    print(f"   ✅ 创建演示项目: {project_id}")
    
    conn.commit()
    conn.close()
    return project_id

def add_demo_literature(project_id):
    """添加演示文献"""
    print("📚 添加演示文献...")
    
    conn = sqlite3.connect('data/research.db')
    cursor = conn.cursor()
    
    demo_papers = [
        {
            'title': 'Attention Is All You Need',
            'authors': 'Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin',
            'journal': 'Advances in Neural Information Processing Systems',
            'year': 2017,
            'doi': 'arXiv:1706.03762',
            'abstract': 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely.',
            'keywords': 'transformer, attention mechanism, neural networks, sequence modeling',
            'quality_score': 9.5,
            'relevance_score': 9.8,
            'citation_count': 45000
        },
        {
            'title': 'BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding',
            'authors': 'Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova',
            'journal': 'Proceedings of NAACL-HLT',
            'year': 2019,
            'doi': 'arXiv:1810.04805',
            'abstract': 'We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers.',
            'keywords': 'BERT, bidirectional, pre-training, language model',
            'quality_score': 9.2,
            'relevance_score': 9.5,
            'citation_count': 35000
        },
        {
            'title': 'GPT-3: Language Models are Few-Shot Learners',
            'authors': 'Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, Dario Amodei',
            'journal': 'Advances in Neural Information Processing Systems',
            'year': 2020,
            'doi': 'arXiv:2005.14165',
            'abstract': 'Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples.',
            'keywords': 'GPT-3, few-shot learning, language model, scaling',
            'quality_score': 9.0,
            'relevance_score': 8.8,
            'citation_count': 25000
        },
        {
            'title': 'RoBERTa: A Robustly Optimized BERT Pretraining Approach',
            'authors': 'Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov',
            'journal': 'arXiv preprint',
            'year': 2019,
            'doi': 'arXiv:1907.11692',
            'abstract': 'Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results.',
            'keywords': 'RoBERTa, BERT optimization, pretraining, hyperparameters',
            'quality_score': 8.7,
            'relevance_score': 9.0,
            'citation_count': 15000
        },
        {
            'title': 'T5: Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer',
            'authors': 'Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu',
            'journal': 'Journal of Machine Learning Research',
            'year': 2020,
            'doi': 'arXiv:1910.10683',
            'abstract': 'Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice.',
            'keywords': 'T5, transfer learning, text-to-text, unified framework',
            'quality_score': 8.9,
            'relevance_score': 8.7,
            'citation_count': 12000
        }
    ]
    
    for paper in demo_papers:
        literature_id = f'lit_{uuid.uuid4().hex[:8]}'
        
        cursor.execute('''
            INSERT INTO literature (
                id, project_id, title, authors, journal, year, doi,
                abstract, keywords, quality_score, relevance_score, citation_count
            ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
        ''', (
            literature_id, project_id, paper['title'], paper['authors'],
            paper['journal'], paper['year'], paper['doi'], paper['abstract'],
            paper['keywords'], paper['quality_score'], paper['relevance_score'],
            paper['citation_count']
        ))
        
        print(f"   ✅ 添加文献: {paper['title'][:50]}...")
    
    conn.commit()
    conn.close()
    print(f"   📊 共添加 {len(demo_papers)} 篇演示文献")

def add_demo_research_plan(project_id):
    """添加演示研究计划"""
    print("📋 添加演示研究计划...")
    
    conn = sqlite3.connect('data/research.db')
    cursor = conn.cursor()
    
    plan_id = f'plan_{uuid.uuid4().hex[:8]}'
    
    research_plan = {
        'id': plan_id,
        'project_id': project_id,
        'title': '基于深度学习的自然语言处理研究 - 研究计划',
        'generated_at': datetime.now().isoformat(),
        'total_duration': '12个月',
        'phases': [
            {
                'id': f'phase_{uuid.uuid4().hex[:8]}',
                'name': '文献调研阶段',
                'description': '针对深度学习和自然语言处理领域进行系统性文献调研',
                'duration': '2个月',
                'status': 'completed',
                'progress': 100,
                'deliverables': [
                    'NLP领域核心文献清单',
                    'Transformer架构综述报告',
                    '研究空白分析',
                    '技术路线图'
                ],
                'tasks': [
                    '收集Transformer相关的核心论文',
                    '分析现有注意力机制的优缺点',
                    '识别技术改进空间',
                    '制定研究方向'
                ],
                'resources_needed': [
                    '学术数据库访问权限',
                    '文献管理工具',
                    '至少60小时研究时间'
                ]
            },
            {
                'id': f'phase_{uuid.uuid4().hex[:8]}',
                'name': '模型设计阶段',
                'description': '设计改进的Transformer架构和注意力机制',
                'duration': '3个月',
                'status': 'in-progress',
                'progress': 60,
                'deliverables': [
                    '改进的Transformer架构设计',
                    '新注意力机制算法',
                    '理论分析报告',
                    '初步实现代码'
                ],
                'tasks': [
                    '分析现有架构的局限性',
                    '设计新的注意力机制',
                    '进行理论可行性分析',
                    '实现原型系统'
                ],
                'resources_needed': [
                    'GPU计算资源',
                    'PyTorch/TensorFlow框架',
                    '至少80小时开发时间'
                ]
            },
            {
                'id': f'phase_{uuid.uuid4().hex[:8]}',
                'name': '实验验证阶段',
                'description': '在标准数据集上验证模型性能',
                'duration': '4个月',
                'status': 'pending',
                'progress': 0,
                'deliverables': [
                    '实验设计方案',
                    '基准测试结果',
                    '性能对比分析',
                    '消融实验报告'
                ],
                'tasks': [
                    '准备标准评测数据集',
                    '实现基线模型',
                    '进行对比实验',
                    '分析实验结果'
                ],
                'resources_needed': [
                    '大规模计算集群',
                    '标准数据集',
                    '至少100小时实验时间'
                ]
            },
            {
                'id': f'phase_{uuid.uuid4().hex[:8]}',
                'name': '论文撰写阶段',
                'description': '撰写和发表学术论文',
                'duration': '3个月',
                'status': 'pending',
                'progress': 0,
                'deliverables': [
                    '学术论文初稿',
                    '实验数据整理',
                    '论文修改稿',
                    '会议/期刊投稿'
                ],
                'tasks': [
                    '整理实验数据和结果',
                    '撰写论文各个章节',
                    '制作实验图表',
                    '论文投稿和修改'
                ],
                'resources_needed': [
                    'LaTeX写作环境',
                    '数据可视化工具',
                    '至少60小时写作时间'
                ]
            }
        ],
        'timeline': {
            'start_date': '2024-01-01',
            'estimated_end_date': '2024-12-31',
            'milestones': [
                {'month': 2, 'name': '文献调研完成', 'phase': 'literature'},
                {'month': 5, 'name': '模型设计完成', 'phase': 'design'},
                {'month': 9, 'name': '实验验证完成', 'phase': 'experiment'},
                {'month': 12, 'name': '论文发表完成', 'phase': 'publication'}
            ]
        },
        'risk_assessment': [
            {
                'risk': '计算资源不足',
                'probability': '中等',
                'impact': '高',
                'mitigation': '申请云计算资源，优化算法效率'
            },
            {
                'risk': '实验结果不理想',
                'probability': '中等',
                'impact': '中等',
                'mitigation': '准备多个备选方案，及时调整研究方向'
            },
            {
                'risk': '论文被拒',
                'probability': '低',
                'impact': '中等',
                'mitigation': '选择合适期刊，充分准备实验数据'
            }
        ]
    }
    
    cursor.execute('''
        INSERT INTO research_plans (id, project_id, plan_data, status)
        VALUES (?, ?, ?, ?)
    ''', (plan_id, project_id, json.dumps(research_plan, ensure_ascii=False), 'active'))
    
    # 保存各个阶段到研究阶段表
    for phase in research_plan['phases']:
        cursor.execute('''
            INSERT INTO research_phases (
                id, project_id, plan_id, phase_name, phase_description, 
                duration, status, progress, deliverables, tasks, resources_needed
            ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
        ''', (
            phase['id'], project_id, plan_id, phase['name'], phase['description'],
            phase['duration'], phase['status'], phase['progress'],
            json.dumps(phase['deliverables'], ensure_ascii=False),
            json.dumps(phase['tasks'], ensure_ascii=False),
            json.dumps(phase['resources_needed'], ensure_ascii=False)
        ))
    
    conn.commit()
    conn.close()
    print(f"   ✅ 创建研究计划: {plan_id}")

def add_demo_experiment_design(project_id):
    """添加演示实验设计"""
    print("🧪 添加演示实验设计...")
    
    conn = sqlite3.connect('data/research.db')
    cursor = conn.cursor()
    
    design_id = f'design_{uuid.uuid4().hex[:8]}'
    
    research_question = "改进的Transformer架构能否在自然语言处理任务上取得更好的性能？"
    
    hypotheses = [
        "新设计的多头注意力机制能够更好地捕获长距离依赖关系",
        "改进的位置编码方法能够提升模型对序列位置的理解能力",
        "优化的预训练策略能够显著提升下游任务的性能"
    ]
    
    experimental_groups = [
        {
            'name': '基线Transformer组',
            'description': '使用标准Transformer架构作为对照组',
            'parameters': {
                'architecture': 'standard_transformer',
                'attention_heads': 8,
                'hidden_size': 512,
                'layers': 6
            }
        },
        {
            'name': '改进注意力组',
            'description': '使用改进的多头注意力机制',
            'parameters': {
                'architecture': 'improved_attention',
                'attention_heads': 8,
                'hidden_size': 512,
                'layers': 6,
                'attention_type': 'enhanced'
            }
        },
        {
            'name': '完整改进组',
            'description': '使用所有改进技术的完整模型',
            'parameters': {
                'architecture': 'full_improved',
                'attention_heads': 8,
                'hidden_size': 512,
                'layers': 6,
                'attention_type': 'enhanced',
                'position_encoding': 'improved'
            }
        }
    ]
    
    evaluation_metrics = [
        {'name': 'BLEU分数', 'description': '机器翻译质量评估指标', 'type': 'primary'},
        {'name': 'ROUGE分数', 'description': '文本摘要质量评估指标', 'type': 'primary'},
        {'name': '困惑度', 'description': '语言模型性能指标', 'type': 'secondary'},
        {'name': 'F1分数', 'description': '分类任务性能指标', 'type': 'primary'},
        {'name': '训练时间', 'description': '模型训练效率指标', 'type': 'efficiency'},
        {'name': '推理速度', 'description': '模型推理效率指标', 'type': 'efficiency'}
    ]
    
    methodology = """
    实验方法论：
    1. 数据准备：使用WMT翻译数据集、CNN/DailyMail摘要数据集等标准基准
    2. 模型实现：基于PyTorch框架实现各种架构变体
    3. 训练策略：使用相同的超参数和训练策略确保公平比较
    4. 评估方法：在多个下游任务上进行评估，包括机器翻译、文本摘要、情感分析等
    5. 统计分析：使用t检验验证结果的统计显著性
    6. 消融实验：分析各个改进组件的独立贡献
    """
    
    cursor.execute('''
        INSERT INTO experiment_designs (
            id, project_id, research_question, hypotheses, experimental_groups,
            evaluation_metrics, methodology, expected_outcomes, status
        ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
    ''', (
        design_id, project_id, research_question,
        json.dumps(hypotheses, ensure_ascii=False),
        json.dumps(experimental_groups, ensure_ascii=False),
        json.dumps(evaluation_metrics, ensure_ascii=False),
        methodology,
        "预期改进的Transformer架构在多个NLP任务上取得2-5%的性能提升",
        'active'
    ))
    
    conn.commit()
    conn.close()
    print(f"   ✅ 创建实验设计: {design_id}")

def main():
    """主函数"""
    print("🎭 添加演示数据")
    print("="*50)
    
    # 确保数据目录存在
    from pathlib import Path
    Path('data').mkdir(exist_ok=True)
    
    # 初始化数据库
    try:
        from app import init_database
        init_database()
        print("✅ 数据库初始化完成")
    except Exception as e:
        print(f"❌ 数据库初始化失败: {e}")
        return
    
    # 添加演示数据
    project_id = add_demo_project()
    add_demo_literature(project_id)
    add_demo_research_plan(project_id)
    add_demo_experiment_design(project_id)
    
    print("\n" + "="*50)
    print("🎉 演示数据添加完成！")
    print("="*50)
    print("📍 现在可以启动系统并查看演示项目:")
    print("   1. 运行: python run_system.py")
    print("   2. 访问: http://localhost:8080")
    print("   3. 查看演示项目: '基于深度学习的自然语言处理研究'")

if __name__ == "__main__":
    main()