import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import json
import os
import numpy as np
from transformers import GPT2Tokenizer
import pickle

class SimpleAIModel(nn.Module):
    """简单的对话AI模型"""
    
    def __init__(self, vocab_size=50257, embed_dim=256, hidden_dim=512, num_layers=2, max_seq_len=128):
        super(SimpleAIModel, self).__init__()
        
        self.vocab_size = vocab_size
        self.embed_dim = embed_dim
        self.hidden_dim = hidden_dim
        self.max_seq_len = max_seq_len
        
        # 词嵌入层
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.pos_embedding = nn.Embedding(max_seq_len, embed_dim)
        
        # 简单的Transformer-like架构
        self.transformer_blocks = nn.ModuleList([
            nn.TransformerEncoderLayer(
                d_model=embed_dim,
                nhead=8,
                dim_feedforward=hidden_dim,
                dropout=0.1,
                batch_first=True
            ) for _ in range(num_layers)
        ])
        
        # 输出层
        self.ln_f = nn.LayerNorm(embed_dim)
        self.output = nn.Linear(embed_dim, vocab_size)
        
        # 初始化权重
        self._init_weights()
    
    def _init_weights(self):
        """初始化模型权重"""
        for module in self.modules():
            if isinstance(module, nn.Linear):
                torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
                if module.bias is not None:
                    torch.nn.init.zeros_(module.bias)
            elif isinstance(module, nn.Embedding):
                torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
    
    def forward(self, input_ids, attention_mask=None):
        batch_size, seq_len = input_ids.shape
        
        # 位置编码
        pos_ids = torch.arange(0, seq_len, dtype=torch.long, device=input_ids.device)
        pos_ids = pos_ids.unsqueeze(0).expand(batch_size, -1)
        
        # 词嵌入 + 位置嵌入
        token_embeddings = self.embedding(input_ids)
        pos_embeddings = self.pos_embedding(pos_ids)
        hidden_states = token_embeddings + pos_embeddings
        
        # Transformer层
        for transformer in self.transformer_blocks:
            hidden_states = transformer(hidden_states, src_key_padding_mask=~attention_mask.bool() if attention_mask is not None else None)
        
        # 输出层
        hidden_states = self.ln_f(hidden_states)
        logits = self.output(hidden_states)
        
        return logits
    
    def generate(self, tokenizer, prompt, max_length=50, temperature=0.8, top_k=50):
        """生成文本回复"""
        self.eval()
        
        # 编码输入
        input_ids = tokenizer.encode(prompt, return_tensors='pt')
        
        with torch.no_grad():
            for _ in range(max_length):
                # 截断到最大长度
                if input_ids.shape[1] > self.max_seq_len:
                    input_ids = input_ids[:, -self.max_seq_len:]
                
                # 前向传播
                logits = self.forward(input_ids)
                next_token_logits = logits[0, -1, :] / temperature
                
                # Top-k采样
                if top_k > 0:
                    top_k_logits, _ = torch.topk(next_token_logits, top_k)
                    next_token_logits[next_token_logits < top_k_logits[-1]] = -float('Inf')
                
                # 采样下一个token
                probabilities = torch.softmax(next_token_logits, dim=-1)
                next_token = torch.multinomial(probabilities, 1)
                
                # 检查结束条件
                if next_token.item() == tokenizer.eos_token_id:
                    break
                
                # 添加到序列
                input_ids = torch.cat([input_ids, next_token.unsqueeze(0)], dim=1)
        
        # 解码生成的文本
        generated_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)
        return generated_text[len(prompt):].strip()

class ConversationDataset(Dataset):
    """对话数据集"""
    
    def __init__(self, data_path, tokenizer, max_length=128):
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.conversations = self._load_data(data_path)
    
    def _load_data(self, data_path):
        """加载对话数据"""
        conversations = []
        
        if os.path.exists(data_path):
            with open(data_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
                conversations = data.get('conversations', [])
        else:
            # 创建示例数据
            conversations = [
                {"input": "你好", "output": "你好！我是AI助手，有什么可以帮助你的吗？"},
                {"input": "你是谁", "output": "我是一个AI助手，可以回答问题和进行对话。"},
                {"input": "今天天气怎么样", "output": "抱歉，我无法获取实时天气信息，建议你查看天气预报。"},
                {"input": "谢谢你", "output": "不客气！很高兴能帮助到你。"},
                {"input": "再见", "output": "再见！祝你有美好的一天！"}
            ]
            
            # 保存示例数据
            data_dir = os.path.dirname(data_path)
            if data_dir:
                os.makedirs(data_dir, exist_ok=True)
            else:
                os.makedirs('data', exist_ok=True)
            with open(data_path, 'w', encoding='utf-8') as f:
                json.dump({"conversations": conversations}, f, ensure_ascii=False, indent=2)
        
        return conversations
    
    def __len__(self):
        return len(self.conversations)
    
    def __getitem__(self, idx):
        conv = self.conversations[idx]
        
        # 构建输入文本 (问题 + 答案)
        text = f"问：{conv['input']} 答：{conv['output']}"
        
        # 编码
        encoded = self.tokenizer.encode(text, max_length=self.max_length, 
                                      truncation=True, padding='max_length', 
                                      return_tensors='pt')
        
        input_ids = encoded.squeeze()
        
        # 标签就是输入向右移动一位
        labels = input_ids.clone()
        labels[:-1] = input_ids[1:]
        labels[-1] = self.tokenizer.pad_token_id
        
        return {
            'input_ids': input_ids,
            'labels': labels,
            'attention_mask': (input_ids != self.tokenizer.pad_token_id).long()
        }

def save_model(model, tokenizer, save_dir):
    """保存模型和tokenizer"""
    os.makedirs(save_dir, exist_ok=True)
    
    # 保存模型
    torch.save(model.state_dict(), os.path.join(save_dir, 'model.pth'))
    
    # 保存模型配置
    config = {
        'vocab_size': model.vocab_size,
        'embed_dim': model.embed_dim,
        'hidden_dim': model.hidden_dim,
        'num_layers': len(model.transformer_blocks),
        'max_seq_len': model.max_seq_len
    }
    with open(os.path.join(save_dir, 'config.json'), 'w') as f:
        json.dump(config, f, indent=2)
    
    print(f"模型已保存到: {save_dir}")

def load_model(save_dir, device='cpu'):
    """加载模型和tokenizer"""
    # 加载配置
    with open(os.path.join(save_dir, 'config.json'), 'r') as f:
        config = json.load(f)
    
    # 创建模型
    model = SimpleAIModel(**config)
    
    # 加载权重
    model.load_state_dict(torch.load(os.path.join(save_dir, 'model.pth'), map_location=device))
    model.to(device)
    
    # 加载tokenizer
    tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    
    print(f"模型已从 {save_dir} 加载")
    return model, tokenizer