"use client";

import React from "react";
import { useLanguage } from "~/lib/LanguageContext";
import Link from "next/link";
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter';
import { vscDarkPlus, oneLight } from 'react-syntax-highlighter/dist/esm/styles/prism';

export default function LLMFinetuning() {
  const { language } = useLanguage();
  const isDarkMode = typeof window !== 'undefined' && window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches;

  // 代码内容
  const pythonCodeZh = `# 安装必要的库
!pip install transformers peft datasets accelerate bitsandbytes

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
from peft import get_peft_model, LoraConfig, prepare_model_for_kbit_training
from datasets import Dataset

# 1. 加载预训练模型（使用 CodeMariner 的基础模型）
model_id = "codemariner/diagram-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    load_in_8bit=True,
    device_map="auto",
    torch_dtype=torch.float16
)

# 2. 准备模型进行LoRA微调
model = prepare_model_for_kbit_training(model)

# 3. 定义LoRA配置（针对代码理解与文档生成任务优化）
peft_config = LoraConfig(
    r=8,
    lora_alpha=16,
    lora_dropout=0.1,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
    bias="none",
    task_type="CAUSAL_LM"
)

# 4. 创建PEFT模型
model = get_peft_model(model, peft_config)

# 5. 准备多类型文档生成训练数据
diagram_data = [
    {"input": "分析以下代码库结构并生成系统架构图:\\n\`\`\`\\nsrc/\\n  components/\\n    Diagram.tsx\\n    NavBar.tsx\\n  lib/\\n    github.ts\\n    utils.ts\\n  app/\\n    page.tsx\\n    layout.tsx\\n\`\`\`", 
     "output": "\`\`\`mermaid\\ngraph TD\\n  A[前端应用] --> B[组件]\\n  A --> C[库]\\n  A --> D[页面]\\n  B --> B1[Diagram.tsx]\\n  B --> B2[NavBar.tsx]\\n  C --> C1[github.ts]\\n  C --> C2[utils.ts]\\n  D --> D1[page.tsx]\\n  D --> D2[layout.tsx]\\n\`\`\`"}
]

api_docs_data = [
    {"input": "为以下代码生成API文档:\\n\`\`\`typescript\\nexport async function fetchRepository(owner: string, repo: string): Promise<RepoData> {\\n  const response = await fetch(\`https://api.github.com/repos/\${owner}/\${repo}\`);\\n  if (!response.ok) throw new Error('Failed to fetch repository');\\n  return response.json();\\n}\\n\`\`\`", 
     "output": "## fetchRepository\\n\\n**功能**: 从GitHub API获取仓库信息\\n\\n**参数**:\\n- owner: string - 仓库所有者名称\\n- repo: string - 仓库名称\\n\\n**返回值**: Promise<RepoData> - 包含仓库数据的Promise\\n\\n**异常**: 如果API请求失败，抛出错误"}
]

code_comments_data = [
    {"input": "为以下代码生成详细注释:\\n\`\`\`typescript\\nfunction processDiagramData(repoStructure) {\\n  const nodes = [];\\n  const edges = [];\\n  \\n  for (const file of repoStructure.files) {\\n    nodes.push({ id: file.path, label: file.name });\\n    \\n    if (file.imports && file.imports.length > 0) {\\n      for (const imp of file.imports) {\\n        edges.push({ from: file.path, to: imp });\\n      }\\n    }\\n  }\\n  \\n  return { nodes, edges };\\n}\\n\`\`\`", 
     "output": "/**\\n * 处理仓库结构数据并转换为图表可用的节点和边缘数据\\n * \\n * @param {Object} repoStructure - 包含文件列表的仓库结构对象\\n * @param {Array} repoStructure.files - 仓库中的文件列表\\n * @returns {Object} 返回组织好的图表数据\\n * @returns {Array} nodes - 表示文件的节点数组\\n * @returns {Array} edges - 表示文件间依赖关系的边缘数组\\n *\\n * 函数逻辑：\\n * 1. 初始化空的节点和边缘数组\\n * 2. 遍历所有文件，为每个文件创建一个节点\\n * 3. 如果文件有导入依赖，则为每个依赖创建一条边\\n * 4. 返回组织好的节点和边缘数据\\n */"}
]

# 合并所有类型的训练数据
train_data = diagram_data + api_docs_data + code_comments_data

# 创建训练数据集
train_dataset = Dataset.from_dict({
    "input": [item["input"] for item in train_data],
    "output": [item["output"] for item in train_data]
})

# 6. 数据处理函数
def data_collator(features):
    inputs = [f["input"] for f in features]
    outputs = [f["output"] for f in features]
    
    # 格式化为指令格式
    prompts = [f"<指令>{inp}</指令>" for inp in inputs]
    targets = [f"<回复>{out}</回复>" for out in outputs]
    
    # 编码并设置标签
    inputs_ids = tokenizer(prompts, padding=True, truncation=True, return_tensors="pt").input_ids
    labels = tokenizer(targets, padding=True, truncation=True, return_tensors="pt").input_ids
    
    return {"input_ids": inputs_ids, "labels": labels}

# 7. 定义训练参数并开始训练
training_args = TrainingArguments(
    output_dir="./codemariner-finetuned",
    per_device_train_batch_size=8,
    gradient_accumulation_steps=1,
    learning_rate=2e-4,
    lr_scheduler_type="cosine",
    warmup_steps=50,
    max_steps=500,
    fp16=True,
    logging_steps=10,
    save_steps=50,
    save_total_limit=3,
    evaluation_strategy="steps",
    eval_steps=50,
)

# 8. 开始训练
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    data_collator=data_collator,
)
trainer.train()

# 9. 保存微调后的模型
model.save_pretrained("./codemariner-finetuned-final")

# 10. 使用微调模型生成多种文档
def generate_documentation(code_input, doc_type="diagram"):
    # 构建提示词
    if doc_type == "diagram":
        prompt = f"<指令>分析以下代码库结构并生成系统架构图:\\n{code_input}</指令>"
    elif doc_type == "api":
        prompt = f"<指令>为以下代码生成API文档:\\n{code_input}</指令>"
    elif doc_type == "comments":
        prompt = f"<指令>为以下代码生成详细注释:\\n{code_input}</指令>"
    
    # 编码输入
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    
    # 生成输出
    outputs = model.generate(
        inputs.input_ids,
        max_new_tokens=1024,
        temperature=0.7,
        top_p=0.9,
        do_sample=True
    )
    
    # 解码输出
    return tokenizer.decode(outputs[0], skip_special_tokens=True)`;

  const pythonCodeEn = `# Install necessary libraries
!pip install transformers peft datasets accelerate bitsandbytes

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
from peft import get_peft_model, LoraConfig, prepare_model_for_kbit_training
from datasets import Dataset

# 1. Load the pre-trained model (using CodeMariner's base model)
model_id = "codemariner/diagram-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    load_in_8bit=True,
    device_map="auto",
    torch_dtype=torch.float16
)

# 2. Prepare the model for LoRA fine-tuning
model = prepare_model_for_kbit_training(model)

# 3. Define LoRA configuration (optimized for code understanding and documentation tasks)
peft_config = LoraConfig(
    r=8,
    lora_alpha=16,
    lora_dropout=0.1,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
    bias="none",
    task_type="CAUSAL_LM"
)

# 4. Create PEFT model
model = get_peft_model(model, peft_config)

# 5. Prepare multi-type documentation training data
diagram_data = [
    {"input": "Analyze the following codebase structure and generate a system architecture diagram:\\n\`\`\`\\nsrc/\\n  components/\\n    Diagram.tsx\\n    NavBar.tsx\\n  lib/\\n    github.ts\\n    utils.ts\\n  app/\\n    page.tsx\\n    layout.tsx\\n\`\`\`", 
     "output": "\`\`\`mermaid\\ngraph TD\\n  A[Frontend App] --> B[Components]\\n  A --> C[Libraries]\\n  A --> D[Pages]\\n  B --> B1[Diagram.tsx]\\n  B --> B2[NavBar.tsx]\\n  C --> C1[github.ts]\\n  C --> C2[utils.ts]\\n  D --> D1[page.tsx]\\n  D --> D2[layout.tsx]\\n\`\`\`"}
]

api_docs_data = [
    {"input": "Generate API documentation for the following code:\\n\`\`\`typescript\\nexport async function fetchRepository(owner: string, repo: string): Promise<RepoData> {\\n  const response = await fetch(\`https://api.github.com/repos/\${owner}/\${repo}\`);\\n  if (!response.ok) throw new Error('Failed to fetch repository');\\n  return response.json();\\n}\\n\`\`\`", 
     "output": "## fetchRepository\\n\\n**Purpose**: Fetches repository information from the GitHub API\\n\\n**Parameters**:\\n- owner: string - The repository owner's username\\n- repo: string - The repository name\\n\\n**Returns**: Promise<RepoData> - A promise containing the repository data\\n\\n**Throws**: Error if the API request fails"}
]

code_comments_data = [
    {"input": "Generate detailed comments for the following code:\\n\`\`\`typescript\\nfunction processDiagramData(repoStructure) {\\n  const nodes = [];\\n  const edges = [];\\n  \\n  for (const file of repoStructure.files) {\\n    nodes.push({ id: file.path, label: file.name });\\n    \\n    if (file.imports && file.imports.length > 0) {\\n      for (const imp of file.imports) {\\n        edges.push({ from: file.path, to: imp });\\n      }\\n    }\\n  }\\n  \\n  return { nodes, edges };\\n}\\n\`\`\`", 
     "output": "/**\\n * Processes repository structure data and transforms it into nodes and edges for diagram rendering\\n * \\n * @param {Object} repoStructure - The repository structure object containing file list\\n * @param {Array} repoStructure.files - List of files in the repository\\n * @returns {Object} Returns organized diagram data\\n * @returns {Array} nodes - Array of nodes representing files\\n * @returns {Array} edges - Array of edges representing dependencies between files\\n *\\n * Function logic:\\n * 1. Initialize empty arrays for nodes and edges\\n * 2. Loop through all files and create a node for each file\\n * 3. If a file has imports, create an edge for each dependency\\n * 4. Return the organized node and edge data\\n */"}
]

# Combine all types of training data
train_data = diagram_data + api_docs_data + code_comments_data

# Create training dataset
train_dataset = Dataset.from_dict({
    "input": [item["input"] for item in train_data],
    "output": [item["output"] for item in train_data]
})

# 6. Data processing function
def data_collator(features):
    inputs = [f["input"] for f in features]
    outputs = [f["output"] for f in features]
    
    # Format as instruction format
    prompts = [f"<instruction>{inp}</instruction>" for inp in inputs]
    targets = [f"<response>{out}</response>" for out in outputs]
    
    # Encode and set labels
    inputs_ids = tokenizer(prompts, padding=True, truncation=True, return_tensors="pt").input_ids
    labels = tokenizer(targets, padding=True, truncation=True, return_tensors="pt").input_ids
    
    return {"input_ids": inputs_ids, "labels": labels}

# 7. Define training arguments and start training
training_args = TrainingArguments(
    output_dir="./codemariner-finetuned",
    per_device_train_batch_size=8,
    gradient_accumulation_steps=1,
    learning_rate=2e-4,
    lr_scheduler_type="cosine",
    warmup_steps=50,
    max_steps=500,
    fp16=True,
    logging_steps=10,
    save_steps=50,
    save_total_limit=3,
    evaluation_strategy="steps",
    eval_steps=50,
)

# 8. Start training
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    data_collator=data_collator,
)
trainer.train()

# 9. Save the fine-tuned model
model.save_pretrained("./codemariner-finetuned-final")

# 10. Use the fine-tuned model to generate various documentation
def generate_documentation(code_input, doc_type="diagram"):
    # Construct prompt
    if doc_type == "diagram":
        prompt = f"<instruction>Analyze the following codebase structure and generate a system architecture diagram:\\n{code_input}</instruction>"
    elif doc_type == "api":
        prompt = f"<instruction>Generate API documentation for the following code:\\n{code_input}</instruction>"
    elif doc_type == "comments":
        prompt = f"<instruction>Generate detailed comments for the following code:\\n{code_input}</instruction>"
    
    # Encode input
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    
    # Generate output
    outputs = model.generate(
        inputs.input_ids,
        max_new_tokens=1024,
        temperature=0.7,
        top_p=0.9,
        do_sample=True
    )
    
    # Decode output
    return tokenizer.decode(outputs[0], skip_special_tokens=True)`;

  return (
    <div className="relative">
      <div className="absolute top-4 left-4">
        <Link href="/" className="inline-flex items-center rounded-md bg-blue-600 px-4 py-2 text-white hover:bg-blue-700 transition-colors">
          <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" className="mr-2">
            <path d="M15 18l-6-6 6-6" />
          </svg>
          {language === 'zh' ? '返回首页' : 'Back to Home'}
        </Link>
      </div>
      
      <div className="container mx-auto max-w-4xl px-4 py-8">
        <h1 className="mb-8 text-center text-3xl font-bold">
          {language === 'zh' ? 'CodeMariner 大模型代码理解与文档生成' : 'CodeMariner LLM Code Understanding & Documentation'}
        </h1>
        
        <div className="mb-10 rounded-lg border border-blue-100 bg-blue-50/30 p-6 dark:border-blue-900/30 dark:bg-blue-900/10">
          <div className="mb-4 flex items-center text-blue-600 dark:text-blue-400">
            <svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" className="mr-2">
              <path d="M12 2v8"/>
              <path d="m4.93 10.93 1.41 1.41"/>
              <path d="M2 18h2"/>
              <path d="M20 18h2"/>
              <path d="m19.07 10.93-1.41 1.41"/>
              <path d="M22 22H2"/>
              <path d="m8 22 4-10 4 10"/>
              <path d="M12 14v4"/>
            </svg>
            <h2 className="text-xl font-semibold">
              {language === 'zh' ? '深度代码理解：生成图表与多类型文档' : 'Deep Code Understanding: Diagrams & Multi-type Documentation'}
            </h2>
          </div>
          <p className="text-gray-700 dark:text-gray-300">
            {language === 'zh' 
              ? 'CodeMariner提供了一套实用的大模型微调方法，使您能够训练专属的AI模型，不仅生成精确的代码图表，还能生成API文档、代码注释、使用指南等多类型文档。通过增强AI对代码结构的理解能力，让您的团队更高效地开发和维护项目。' 
              : 'CodeMariner provides practical LLM fine-tuning methods that enable you to train dedicated AI models that not only generate precise code diagrams but also produce various types of documentation such as API references, code comments, user guides, and more. By enhancing AI\'s ability to understand code structures, your team can develop and maintain projects more efficiently.'}
          </p>
        </div>

        <div className="mb-8 grid gap-8 md:grid-cols-2">
          <div className="rounded-lg border border-gray-200 bg-white p-6 shadow-sm dark:border-gray-800 dark:bg-gray-900">
            <h3 className="mb-4 text-lg font-medium text-blue-600 dark:text-blue-400">
              {language === 'zh' ? '为什么要微调模型？' : 'Why Fine-tune Models?'}
            </h3>
            <div className="space-y-4 text-gray-700 dark:text-gray-300">
              <p>
                {language === 'zh' 
                  ? '通用大语言模型虽然功能强大，但针对特定领域的代码理解和文档生成还存在局限性：' 
                  : 'General large language models are powerful, but have limitations when it comes to specialized code understanding and documentation:'}
              </p>
              <ul className="space-y-2 pl-5 list-disc">
                <li>
                  {language === 'zh' 
                    ? '难以理解您团队特有的代码模式、命名约定和架构风格' 
                    : 'Difficulty understanding your team\'s unique code patterns, naming conventions, and architectural styles'}
                </li>
                <li>
                  {language === 'zh' 
                    ? '生成的文档和图表可能不符合行业或公司标准规范' 
                    : 'Generated documentation and diagrams may not comply with industry or company standard specifications'}
                </li>
                <li>
                  {language === 'zh' 
                    ? '对特定框架、库或领域知识的理解有限' 
                    : 'Limited understanding of specific frameworks, libraries, or domain knowledge'}
                </li>
                <li>
                  {language === 'zh' 
                    ? '无法自动生成符合团队风格的多类型技术文档' 
                    : 'Inability to automatically generate multiple types of technical documentation matching your team\'s style'}
                </li>
              </ul>
            </div>
          </div>

          <div className="rounded-lg border border-gray-200 bg-white p-6 shadow-sm dark:border-gray-800 dark:bg-gray-900">
            <h3 className="mb-4 text-lg font-medium text-blue-600 dark:text-blue-400">
              {language === 'zh' ? '微调的好处' : 'Benefits of Fine-tuning'}
            </h3>
            <div className="space-y-4 text-gray-700 dark:text-gray-300">
              <ul className="space-y-2 pl-5 list-disc">
                <li>
                  {language === 'zh' 
                    ? '增强代码理解准确率，适应特定技术栈和领域知识' 
                    : 'Enhanced code comprehension accuracy, adapting to specific tech stacks and domain knowledge'}
                </li>
                <li>
                  {language === 'zh' 
                    ? '生成符合团队风格的多种类型文档和图表' 
                    : 'Generate various types of documentation and diagrams that match your team\'s style'}
                </li>
                <li>
                  {language === 'zh' 
                    ? '自动识别项目特定的设计模式、架构和关键组件' 
                    : 'Automatically identify project-specific design patterns, architecture, and key components'}
                </li>
                <li>
                  {language === 'zh' 
                    ? '提高文档一致性，减少手动编写和维护文档的工作量' 
                    : 'Improve documentation consistency while reducing manual documentation writing and maintenance workload'}
                </li>
                <li>
                  {language === 'zh' 
                    ? '加速新团队成员的入职和知识传递过程' 
                    : 'Accelerate onboarding of new team members and knowledge transfer processes'}
                </li>
              </ul>
            </div>
          </div>
        </div>

        <div className="mb-8 rounded-lg border border-gray-200 bg-white p-6 shadow-sm dark:border-gray-800 dark:bg-gray-900">
          <h3 className="mb-4 text-lg font-medium text-blue-600 dark:text-blue-400">
            {language === 'zh' ? 'PEFT 微调方法实施指南' : 'PEFT Fine-tuning Implementation Guide'}
          </h3>
          <div className="space-y-6">
            <p className="text-gray-700 dark:text-gray-300">
              {language === 'zh' 
                ? 'CodeMariner 使用参数高效微调技术（Parameter-Efficient Fine-Tuning，PEFT）来优化大模型性能，无需大量计算资源即可实现良好效果。' 
                : 'CodeMariner uses Parameter-Efficient Fine-Tuning (PEFT) techniques to optimize large language model performance without requiring extensive computational resources.'}
            </p>
            
            <div className="flex items-start">
              <div className="flex h-8 w-8 items-center justify-center rounded-full bg-blue-100 text-blue-600 dark:bg-blue-900/30 dark:text-blue-400 mr-4">
                <span>1</span>
              </div>
              <div>
                <h4 className="mb-1 font-medium">
                  {language === 'zh' ? 'LoRA 适配器设置' : 'LoRA Adapter Setup'}
                </h4>
                <p className="text-gray-700 dark:text-gray-300">
                  {language === 'zh' 
                    ? '低秩适配器（LoRA）是一种高效的微调方法，通过训练小型适配器来修改大模型的行为。在 CodeMariner 中，我们应用以下设置：' 
                    : 'Low-Rank Adaptation (LoRA) is an efficient fine-tuning method that modifies large model behavior by training small adapters. In CodeMariner, we apply the following settings:'}
                </p>
                <div className="mt-2 p-3 bg-gray-50 dark:bg-gray-800 rounded-md font-mono text-sm">
                  <code>
                    r=8<br/>
                    alpha=16<br/>
                    dropout=0.1<br/>
                    target_modules=[&quot;q_proj&quot;, &quot;k_proj&quot;, &quot;v_proj&quot;, &quot;o_proj&quot;]
                  </code>
                </div>
              </div>
            </div>
            
            <div className="flex items-start">
              <div className="flex h-8 w-8 items-center justify-center rounded-full bg-blue-100 text-blue-600 dark:bg-blue-900/30 dark:text-blue-400 mr-4">
                <span>2</span>
              </div>
              <div>
                <h4 className="mb-1 font-medium">
                  {language === 'zh' ? '多功能训练数据准备' : 'Multi-purpose Training Data Preparation'}
                </h4>
                <p className="text-gray-700 dark:text-gray-300 mb-2">
                  {language === 'zh' 
                    ? '创建包含您代码库结构和多种文档输出的指令调优数据集：' 
                    : 'Create an instruction-tuning dataset containing your codebase structure and various documentation outputs:'}
                </p>
                <div className="bg-gray-50 dark:bg-gray-800 rounded-md p-3 font-mono text-sm">
                  {language === 'zh' 
                    ? '// 图表生成样例\n{\n  "input": "分析以下代码库结构并生成系统架构图:\\n{代码库结构}",\n  "output": "{理想的Mermaid图表代码}"\n}\n\n// API文档生成样例\n{\n  "input": "为以下代码生成API文档:\\n{API代码}",\n  "output": "{详细的API文档}"\n}'
                    : '// Diagram generation example\n{\n  "input": "Analyze the following codebase structure and generate a system architecture diagram:\\n{codebase structure}",\n  "output": "{ideal Mermaid diagram code}"\n}\n\n// API documentation example\n{\n  "input": "Generate API documentation for the following code:\\n{API code}",\n  "output": "{detailed API documentation}"\n}'}
                </div>
                <p className="text-gray-700 dark:text-gray-300 mt-2">
                  {language === 'zh' 
                    ? '推荐为每种文档类型（架构图、API文档、代码注释、使用指南等）准备10-50个样本。' 
                    : 'We recommend preparing 10-50 samples for each documentation type (architecture diagrams, API docs, code comments, user guides, etc.).'}
                </p>
              </div>
            </div>
            
            <div className="flex items-start">
              <div className="flex h-8 w-8 items-center justify-center rounded-full bg-blue-100 text-blue-600 dark:bg-blue-900/30 dark:text-blue-400 mr-4">
                <span>3</span>
              </div>
              <div>
                <h4 className="mb-1 font-medium">
                  {language === 'zh' ? '批量大小与学习率优化' : 'Batch Size and Learning Rate Optimization'}
                </h4>
                <p className="text-gray-700 dark:text-gray-300">
                  {language === 'zh' 
                    ? '根据我们的实验，以下参数对代码理解和文档生成任务最有效：' 
                    : 'Based on our experiments, the following parameters are most effective for code understanding and documentation tasks:'}
                </p>
                <div className="mt-2 p-3 bg-gray-50 dark:bg-gray-800 rounded-md font-mono text-sm">
                  <code>
                    batch_size=8<br/>
                    learning_rate=2e-4<br/>
                    warmup_steps=50<br/>
                    max_steps=500
                  </code>
                </div>
              </div>
            </div>
            
            <div className="flex items-start">
              <div className="flex h-8 w-8 items-center justify-center rounded-full bg-blue-100 text-blue-600 dark:bg-blue-900/30 dark:text-blue-400 mr-4">
                <span>4</span>
              </div>
              <div>
                <h4 className="mb-1 font-medium">
                  {language === 'zh' ? '评估与模型选择' : 'Evaluation and Model Selection'}
                </h4>
                <p className="text-gray-700 dark:text-gray-300">
                  {language === 'zh' 
                    ? '使用以下自定义评价指标来选择最佳模型：' 
                    : 'Use the following custom evaluation metrics to select the best model:'}
                </p>
                <ul className="mt-2 space-y-1 pl-5 list-disc text-gray-700 dark:text-gray-300">
                  <li>
                    {language === 'zh' 
                      ? '代码理解深度：模型对代码结构和功能的理解程度' 
                      : 'Code Comprehension Depth: How well the model understands code structure and functionality'}
                  </li>
                  <li>
                    {language === 'zh' 
                      ? '文档准确性：生成文档与代码实际功能的符合度' 
                      : 'Documentation Accuracy: How well generated docs match actual code functionality'}
                  </li>
                  <li>
                    {language === 'zh' 
                      ? '图表完整性：架构图组件识别率和关系捕捉率' 
                      : 'Diagram Completeness: Architecture component identification and relationship capture rates'}
                  </li>
                  <li>
                    {language === 'zh' 
                      ? '文档风格一致性：与团队现有文档风格的匹配度' 
                      : 'Style Consistency: Match with team\'s existing documentation style'}
                  </li>
                </ul>
              </div>
            </div>
          </div>
        </div>

        <div className="mb-8 rounded-lg border border-gray-200 bg-white p-6 shadow-sm dark:border-gray-800 dark:bg-gray-900">
          <h3 className="mb-4 text-lg font-medium text-blue-600 dark:text-blue-400">
            {language === 'zh' ? '代码示例：使用 PEFT 和 LoRA 进行微调' : 'Code Example: Using PEFT and LoRA for Fine-tuning'}
          </h3>
          <div className="overflow-auto">
            <div className="rounded-md">
              <SyntaxHighlighter
                language="python"
                style={isDarkMode ? vscDarkPlus : oneLight}
                showLineNumbers={true}
                customStyle={{
                  margin: 0,
                  padding: '1rem',
                  borderRadius: '0.375rem',
                  fontSize: '0.875rem',
                }}
              >
                {language === 'zh' ? pythonCodeZh : pythonCodeEn}
              </SyntaxHighlighter>
            </div>
          </div>
        </div>

        <div className="rounded-lg border border-cyan-100 bg-cyan-50/30 p-6 dark:border-cyan-900/30 dark:bg-cyan-900/10">
          <h3 className="mb-4 flex items-center text-cyan-600 dark:text-cyan-400 text-lg font-medium">
            <svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" className="mr-2">
              <path d="M12 22s8-4 8-10V5l-8-3-8 3v7c0 6 8 10 8 10z" />
            </svg>
            {language === 'zh' ? '最佳实践与提示' : 'Best Practices and Tips'}
          </h3>
          <ul className="space-y-3 text-gray-700 dark:text-gray-300">
            <li className="flex items-start">
              <span className="mr-2 text-cyan-500">•</span>
              <span>
                {language === 'zh' 
                  ? '确保训练数据包含各种代码结构和文档类型，以增强模型的通用性和适应性' 
                  : 'Ensure training data includes various code structures and document types to enhance model versatility and adaptability'}
              </span>
            </li>
            <li className="flex items-start">
              <span className="mr-2 text-cyan-500">•</span>
              <span>
                {language === 'zh' 
                  ? '针对不同编程语言创建专门的训练集，提高多语言代码理解能力' 
                  : 'Create specialized training sets for different programming languages to improve multilingual code understanding'}
              </span>
            </li>
            <li className="flex items-start">
              <span className="mr-2 text-cyan-500">•</span>
              <span>
                {language === 'zh' 
                  ? '使用验证集定期评估模型在不同文档类型上的性能，避免能力不平衡' 
                  : 'Use validation sets to regularly evaluate model performance across different document types to avoid capability imbalance'}
              </span>
            </li>
            <li className="flex items-start">
              <span className="mr-2 text-cyan-500">•</span>
              <span>
                {language === 'zh' 
                  ? '与开发团队合作收集实际文档反馈，持续改进微调过程' 
                  : 'Collaborate with development teams to collect real document feedback and continuously improve the fine-tuning process'}
              </span>
            </li>
            <li className="flex items-start">
              <span className="mr-2 text-cyan-500">•</span>
              <span>
                {language === 'zh' 
                  ? '考虑使用多任务学习方法，同时优化图表生成和文档创建能力' 
                  : 'Consider multi-task learning approaches to simultaneously optimize diagram generation and documentation creation capabilities'}
              </span>
            </li>
          </ul>
        </div>
      </div>
    </div>
  );
} 