import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
import numpy as np
import os
import random
import math
from tqdm import tqdm
import urllib.request
import zipfile
import shutil
import warnings
warnings.filterwarnings("ignore")
# 设置随机种子
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

set_seed()

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 1. 数据集下载与加载（符合PyTorch标准流程）
class CornellMovieDialogDataset(Dataset):
    """Cornell Movie-Dialogs数据集（PyTorch兼容版）"""
    def __init__(self, conversations, vocab, tokenizer, max_len=50):
        self.conversations = conversations  # 格式: [(对话轮次1, 对话轮次2), ...]
        self.vocab = vocab
        self.tokenizer = tokenizer  # 分词器
        self.max_len = max_len
        self.pad_idx = vocab["<PAD>"]
        self.sos_idx = vocab["<SOS>"]
        self.eos_idx = vocab["<EOS>"]
        
        # 预处理所有对话为训练样本
        self.samples = self._prepare_samples()
    
    def _prepare_samples(self):
        """将对话转换为自回归训练样本"""
        samples = []
        for conv in self.conversations:
            # 合并多轮对话为一个序列（用<SEP>分隔轮次）
            full_conv = []
            for turn in conv:
                full_conv.extend(self.tokenizer(turn.lower()))
                full_conv.append("<SEP>")  # 轮次分隔符
            full_conv = full_conv[:-1]  # 移除最后一个<SEP>
            
            # 转换为索引并添加特殊标记
            tokens = [self.vocab[token] for token in full_conv if token in self.vocab]
            tokens = [self.sos_idx] + tokens + [self.eos_idx]
            
            if len(tokens) >= 2:  # 确保样本有效
                samples.append(tokens)
        return samples
    
    def __len__(self):
        return len(self.samples)
    
    def __getitem__(self, idx):
        tokens = self.samples[idx]
        # 自回归任务：input是前n-1个token，target是后n-1个token
        input_ids = tokens[:-1][:self.max_len]  # 截断到max_len
        target_ids = tokens[1:][:self.max_len]
        
        # 填充到最大长度
        input_ids += [self.pad_idx] * (self.max_len - len(input_ids))
        target_ids += [self.pad_idx] * (self.max_len - len(target_ids))
        
        return {
            "input": torch.tensor(input_ids, dtype=torch.long),
            "target": torch.tensor(target_ids, dtype=torch.long)
        }

def download_cornell_movie_dialogs(data_dir="cornell_movie_dialogs"):
    """
    下载并解压Cornell Movie-Dialogs数据集（PyTorch标准流程）
    官方源：https://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip
    """
    os.makedirs(data_dir, exist_ok=True)
    zip_path = os.path.join(data_dir, "cornell_movie_dialogs_corpus.zip")
    
    # 下载数据集
    if not os.path.exists(zip_path):
        print("正在下载Cornell Movie-Dialogs数据集...")
        url = "https://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip"
        urllib.request.urlretrieve(url, zip_path)
        print("下载完成")
    
    # 解压
    extract_dir = os.path.join(data_dir, "extracted")
    if not os.path.exists(extract_dir):
        with zipfile.ZipFile(zip_path, 'r') as zip_ref:
            zip_ref.extractall(extract_dir)
        # 移动文件到上层目录
        for item in os.listdir(os.path.join(extract_dir, "cornell movie-dialogs corpus")):
            shutil.move(
                os.path.join(extract_dir, "cornell movie-dialogs corpus", item),
                os.path.join(extract_dir, item)
            )
    
    return extract_dir

def load_cornell_conversations(data_dir):
    """加载对话数据（遵循PyTorch数据加载规范）"""
    # 加载所有对话行
    lines = {}
    lines_path = os.path.join(data_dir, "movie_lines.txt")
    with open(lines_path, encoding="iso-8859-1") as f:
        for line in f:
            parts = line.strip().split(" +++$+++ ")
            if len(parts) == 5:
                line_id, _, _, _, text = parts
                lines[line_id] = text.strip()
    
    # 加载对话序列
    conversations = []
    conv_path = os.path.join(data_dir, "movie_conversations.txt")
    with open(conv_path, encoding="iso-8859-1") as f:
        for line in f:
            parts = line.strip().split(" +++$+++ ")
            if len(parts) == 4:
                _, _, _, line_ids = parts
                line_ids = eval(line_ids)  # 转换为列表
                # 提取完整对话
                conv = [lines[line_id] for line_id in line_ids if line_id in lines]
                if len(conv) >= 2:  # 至少两轮对话
                    conversations.append(conv)
    
    print(f"成功加载 {len(conversations)} 个多轮对话")
    return conversations

# 2. 词汇表构建（使用torchtext的工具）
def build_vocabulary(conversations, tokenizer):
    """使用torchtext的build_vocab_from_iterator构建词汇表"""
    def yield_tokens(convs):
        """生成器：逐个返回对话中的token"""
        for conv in convs:
            for turn in conv:
                yield tokenizer(turn.lower())
            yield ["<SEP>"]  # 加入轮次分隔符
    
    # 构建词汇表
    vocab = build_vocab_from_iterator(
        yield_tokens(conversations),
        min_freq=5,  # 过滤低频词
        specials=["<PAD>", "<UNK>", "<SOS>", "<EOS>", "<SEP>"],  # 特殊标记
        special_first=True  # 特殊标记放在前面
    )
    vocab.set_default_index(vocab["<UNK>"])  # 未知词映射到<UNK>
    print(f"词汇表大小: {len(vocab)}")
    return vocab

# 3. 模型定义（基于PyTorch内置Transformer解码器）
class DecoderOnlyDialogModel(nn.Module):
    def __init__(self, vocab_size,pad_idx, d_model=256, nhead=8, num_layers=4,
                 dim_feedforward=512, max_len=50, dropout=0.1):
        super().__init__()
        self.d_model = d_model
        self.vocab_size = vocab_size
        self.pad_idx = pad_idx  # 保存PAD索引，用于生成掩码
        
        # 嵌入层
        self.embedding = nn.Embedding(vocab_size, d_model)
        
        # 位置编码（PyTorch无内置，手动实现）
        self.pos_encoder = self._create_positional_encoding(max_len, d_model)
        
        # Transformer解码器层（PyTorch内置）
        decoder_layer = nn.TransformerDecoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True  # 输入形状: (batch_size, seq_len, d_model)
        )
        self.transformer_decoder = nn.TransformerDecoder(
            decoder_layer=decoder_layer,
            num_layers=num_layers
        )
        
        # 输出层
        self.fc_out = nn.Linear(d_model, vocab_size)
        
        # 初始化权重
        self._init_weights()
    
    def _create_positional_encoding(self, max_len, d_model):
        """创建位置编码矩阵"""
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(max_len, 1, d_model)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        return pe.transpose(0, 1)  # 形状: (1, max_len, d_model)
    
    def _init_weights(self):
        """初始化模型权重"""
        initrange = 0.1
        self.embedding.weight.data.uniform_(-initrange, initrange)
        self.fc_out.bias.data.zero_()
        self.fc_out.weight.data.uniform_(-initrange, initrange)
    
    def _generate_mask(self, seq_len):
        """生成掩蔽矩阵（防止关注未来token）"""
        mask = (torch.triu(torch.ones(seq_len, seq_len)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask
    
    def forward(self, x):
        batch_size, seq_len = x.shape
        
        # 嵌入+位置编码
        x_emb = self.embedding(x) * math.sqrt(self.d_model)
        x_emb = x_emb + self.pos_encoder[:, :seq_len, :].to(x.device)
        
        # 生成掩蔽矩阵
        mask = self._generate_mask(seq_len).to(x.device)
        padding_mask = (x == self.pad_idx).to(x.device)
        # 解码器前向传播（自回归模式）
        
        memory = torch.zeros(
                (batch_size, 1, x_emb.size(-1)),  # batch_size与x一致，seq_len=1，d_model与模型一致
                device=x_emb.device,
                requires_grad=False        # 无需计算梯度，减少开销
            )
        
        output = self.transformer_decoder(
            tgt=x_emb,
            memory=x_emb,  # 仅解码器模型，用自身作为memory
            tgt_mask=mask,
            tgt_key_padding_mask=padding_mask,
            memory_mask = mask
            
        )
        
        # 输出层
        logits = self.fc_out(output)
        return logits

# 4. 训练与评估函数
def train_one_epoch(model, dataloader, criterion, optimizer, device):
    model.train()
    total_loss = 0.0
    
    for batch in tqdm(dataloader, desc="训练中"):
        input_ids = batch["input"].to(device)
        target_ids = batch["target"].to(device)
        
        # 前向传播
        optimizer.zero_grad()
        logits = model(input_ids)  # (batch_size, seq_len, vocab_size)
        
        # 计算损失（忽略PAD token）
        loss = criterion(
            logits.reshape(-1, logits.size(-1)),  # 展平为 (batch*seq_len, vocab_size)
            target_ids.reshape(-1)  # 展平为 (batch*seq_len,)
        )
        
        # 反向传播与优化
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)  # 梯度裁剪
        optimizer.step()
        
        total_loss += loss.item()
    
    return total_loss / len(dataloader)

def evaluate(model, dataloader, criterion, device):
    model.eval()
    total_loss = 0.0
    
    with torch.no_grad():
        for batch in tqdm(dataloader, desc="评估中"):
            input_ids = batch["input"].to(device)
            target_ids = batch["target"].to(device)
            
            logits = model(input_ids)
            loss = criterion(
                logits.reshape(-1, logits.size(-1)),
                target_ids.reshape(-1)
            )
            
            total_loss += loss.item()
    
    return total_loss / len(dataloader)

# 5. 对话生成函数（自回归生成）
def generate_response(model, vocab, tokenizer, input_text, max_len=50, temperature=0.7):
    model.eval()
    pad_idx = vocab["<PAD>"]
    sos_idx = vocab["<SOS>"]
    eos_idx = vocab["<EOS>"]
    sep_idx = vocab["<SEP>"]
    
    # 预处理输入
    tokens = tokenizer(input_text.lower())
    input_ids = [vocab[token] if token in vocab else vocab["<UNK>"] for token in tokens]
    input_ids = [sos_idx] + input_ids  # 添加开始标记
    
    # 转换为张量
    generated = torch.tensor([input_ids], dtype=torch.long).to(device)
    
    with torch.no_grad():
        for _ in range(max_len-len(generated[0])):
            print(_)
            # 若已生成结束标记，停止
            if generated[0, -1].item() == eos_idx:
                break
                
            # 生成下一个token
            logits = model(generated)
            next_token_logits = logits[0, -1, :]  # 最后一个位置的logits
            
            # 应用temperature调整概率
            next_token_logits = next_token_logits / temperature
            probs = torch.softmax(next_token_logits, dim=-1)
            
            # 采样下一个token
            next_token = torch.multinomial(probs, num_samples=1).item()
            
            # 添加到生成序列
            generated = torch.cat([
                generated,
                torch.tensor([[next_token]], dtype=torch.long).to(device)
            ], dim=1)
    
    # 转换为文本
    inv_vocab = vocab.get_itos()  # 获取索引到token的映射（torchtext vocab方法）
    generated_tokens = generated.squeeze().tolist()
    response_tokens = [
        inv_vocab[token] for token in generated_tokens
        if token not in [pad_idx, sos_idx, eos_idx, sep_idx]  # 过滤特殊标记
    ]
    
    return " ".join(response_tokens)

# 6. 主函数（完整训练流程）
def main():
    # 超参数
    batch_size = 64
    max_len = 50
    d_model = 256
    nhead = 8
    num_layers = 4
    dim_feedforward = 512
    dropout = 0.1
    lr = 5e-4
    num_epochs = 20
    model_save_path = "movie_dialog_model.pth"
    data_dir = "cornell_movie_dialogs"
    
    # 下载并加载数据集（PyTorch标准流程）
    print("准备数据集...")
    extract_dir = download_cornell_movie_dialogs(data_dir)
    conversations = load_cornell_conversations(extract_dir)
    if not conversations:
        print("数据集加载失败")
        return
    
    # 采样部分数据加速训练
    sample_size = min(50000, len(conversations))
    conversations = random.sample(conversations, sample_size)
    
    # 初始化分词器（使用torchtext的英文分词器）
    tokenizer = get_tokenizer("basic_english")
    
    # 构建词汇表
    print("构建词汇表...")
    vocab = build_vocabulary(conversations, tokenizer)
    
    # 划分训练集和验证集
    train_size = int(0.9 * len(conversations))
    train_convs = conversations[:train_size]
    val_convs = conversations[train_size:]
    
    # 创建数据集和数据加载器
    train_dataset = CornellMovieDialogDataset(train_convs, vocab, tokenizer, max_len)
    val_dataset = CornellMovieDialogDataset(val_convs, vocab, tokenizer, max_len)
    
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True if device.type == "cuda" else False
    )
    val_dataloader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=4,
        pin_memory=True if device.type == "cuda" else False
    )
    
    # 初始化模型
    print("初始化模型...")
    model = DecoderOnlyDialogModel(
        vocab_size=len(vocab),
        pad_idx=vocab["<PAD>"],  # 传入PAD索引
        d_model=d_model,
        nhead=nhead,
        num_layers=num_layers,
        dim_feedforward=dim_feedforward,
        max_len=max_len,
        dropout=dropout
    ).to(device)
    
    # 加载已有模型（如果存在）
    if os.path.exists(model_save_path):
        model.load_state_dict(torch.load(model_save_path, map_location=device))
        print(f"加载已有模型: {model_save_path}")
    
    # 损失函数和优化器
    criterion = nn.CrossEntropyLoss(ignore_index=vocab["<PAD>"])
    optimizer = optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.98), eps=1e-9)
    
    # 训练模型
    print("开始训练...")
    best_val_loss = float("inf")
    
    for epoch in range(num_epochs):
        print(f"\nEpoch {epoch+1}/{num_epochs}")
        train_loss = train_one_epoch(model, train_dataloader, criterion, optimizer, device)
        val_loss = evaluate(model, val_dataloader, criterion, device)
        
        print(f"训练损失: {train_loss:.4f} | 验证损失: {val_loss:.4f}")
        
        # 保存最佳模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), model_save_path)
            print(f"保存最佳模型 (验证损失: {best_val_loss:.4f})")
    
    # 对话交互
    print("\n训练完成！开始对话（输入'quit'结束）")
    while True:
        user_input = input("You: ")
        if user_input.lower() == "quit":
            print("Bot: Goodbye!")
            break
        
        response = generate_response(model, vocab, tokenizer, user_input, max_len)
        print(f"Bot: {response}")

if __name__ == "__main__":
    main()
    