import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchcrf import CRF
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from torch.nn.utils.rnn import pad_sequence
import argparse
import os
import pickle  # 新增导入

# 1. 动态填充策略优化
class DynamicNERDataset(Dataset):
    def __init__(self, sentences, labels, char2idx, tag2idx):
        self.sentences = [[char2idx.get(c, 1) for c in s] for s in sentences]
        self.labels = [[tag2idx[t] for t in l] for l in labels]
    
    def __len__(self):
        return len(self.sentences)
    
    def __getitem__(self, idx):
        return (
            torch.LongTensor(self.sentences[idx]),
            torch.LongTensor(self.labels[idx])
        )
    
    @staticmethod
    def collate_fn(batch):
        x, y = zip(*batch)
        x_pad = pad_sequence(x, batch_first=True, padding_value=0)
        y_pad = pad_sequence(y, batch_first=True, padding_value=0)
        return x_pad, y_pad

# 2. 加入正则化层（Dropout）
class BiLSTM_CRF(nn.Module):
    def __init__(self, vocab_size, embed_dim, hidden_dim, tag_size, dropout=0.5):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(
            embed_dim, hidden_dim // 2,
            bidirectional=True, batch_first=True
        )
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(hidden_dim, tag_size)
        self.crf = CRF(tag_size, batch_first=True)

    def forward(self, x, tags=None):
        x = self.embedding(x)
        lstm_out, _ = self.lstm(x)
        lstm_out = self.dropout(lstm_out)  # 添加Dropout
        emissions = self.fc(lstm_out)
        
        if tags is not None:
            loss = -self.crf(emissions, tags)
            return loss
        return self.crf.decode(emissions)

# 3. 早停机制实现
class EarlyStopper:
    def __init__(self, patience=3):
        self.patience = patience
        self.counter = 0
        self.min_loss = float('inf')
    
    def check(self, val_loss):
        if val_loss < self.min_loss:
            self.min_loss = val_loss
            self.counter = 0
        else:
            self.counter += 1
            if self.counter >= self.patience:
                return True
        return False

# 4. 学习率调度器配置
def create_optimizer(model, lr=0.001):
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.5, patience=2
    )
    return optimizer, scheduler

# 5. 补充评估指标
def calculate_metrics(model, loader, device, idx2tag):
    model.eval()
    y_true, y_pred = [], []
    with torch.no_grad():
        for batch in loader:
            x, y = batch
            x, y = x.to(device), y.to(device)
            preds = model(x)
            # 处理每个样本
            for true_seq, pred_seq in zip(y.cpu().numpy(), preds):
                seq_len = (true_seq != 0).sum()  # 获取实际长度
                y_true.extend([idx2tag[t] for t in true_seq[:seq_len]])
                y_pred.extend([idx2tag[p] for p in pred_seq[:seq_len]])
    print(classification_report(y_true, y_pred, zero_division=0))
    return classification_report(y_true, y_pred, output_dict=True)

# 数据预处理
def load_data(file_paths):
    sentences, labels = [], []
    current_sentence, current_labels = [], []
    
    for file_path in file_paths:
        with open(file_path, 'r', encoding='gbk') as f:  # 假设所有文件都是 gbk 编码
            for line in f:
                line = line.strip()
                if not line:  # 空行表示句子结束
                    if current_sentence:
                        sentences.append(current_sentence)
                        labels.append(current_labels)
                        current_sentence, current_labels = [], []
                    continue
                
                parts = line.split()
                if len(parts) == 2:
                    char, tag = parts[0], parts[1]
                    current_sentence.append(char)
                    current_labels.append(tag)
    
    return sentences, labels

# 训练函数
def train(model, train_loader, optimizer, device):
    model.train()
    total_loss = 0
    
    for batch in train_loader:
        x, y = batch
        x, y = x.to(device), y.to(device)
        
        optimizer.zero_grad()
        loss = model(x, y)
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
    
    return total_loss / len(train_loader)

# 验证函数
def evaluate(model, val_loader, device):
    model.eval()
    total_loss = 0
    
    with torch.no_grad():
        for batch in val_loader:
            x, y = batch
            x, y = x.to(device), y.to(device)
            
            loss = model(x, y)
            total_loss += loss.item()
    
    return total_loss / len(val_loader)

def predict(model, text, char2idx, idx2tag, device):
    model.eval()
    chars = list(text)
    x = [char2idx.get(c, 1) for c in chars]
    x = torch.LongTensor([x]).to(device)
    
    with torch.no_grad():
        tags = model(x)[0]
    
    return [(c, idx2tag[t]) for c, t in zip(chars, tags[:len(chars)])]  # 补上缺失的括号

# 交互函数
def interactive_predict(model, char2idx, idx2tag, device):
    print("\n进入交互模式（输入 'exit' 退出）")
    while True:
        text = input("请输入要分析的中文句子：").strip()
        if text.lower() in ['exit', 'quit']:
            print("退出交互模式")
            break
        if not text:
            print("输入不能为空！")
            continue
        
        try:
            result = predict(model, text, char2idx, idx2tag, device)
            print("词位标注结果：")
            print(" | ".join([f"{char}/{tag}" for char, tag in result]))
            print("-"*50)
        except Exception as e:
            print(f"分析出错: {str(e)}")

if __name__ == "__main__":
    # 参数配置
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--embed_dim', type=int, default=128)
    parser.add_argument('--hidden_dim', type=int, default=256)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--patience', type=int, default=3)
    parser.add_argument('--model_path', type=str, default='bilstm_crf_model.pth')  # 新增
    parser.add_argument('--dict_path', type=str, default='model_dicts.pkl')  # 新增
    parser.add_argument('--train', action='store_true', help='Force retraining')  # 新增
    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # 尝试加载已有模型
    if not args.train and os.path.exists(args.model_path):
        print("Loading pre-trained model...")
        checkpoint = torch.load(args.model_path, map_location=device)
        with open(args.dict_path, 'rb') as f:
            char2idx, tag2idx, idx2tag = pickle.load(f)
        
        model = BiLSTM_CRF(
            len(char2idx),
            checkpoint['args']['embed_dim'],
            checkpoint['args']['hidden_dim'],
            len(tag2idx),
            dropout=checkpoint['args']['dropout']
        ).to(device)
        model.load_state_dict(checkpoint['model_state_dict'])
        
        # 直接进入交互模式
        interactive_predict(model, char2idx, idx2tag, device)
        exit()
    
    # 需要训练的情况
    else:
        # 加载数据和构建字典
        file_paths = ["pku_training.txt"]
        sentences, labels = load_data(file_paths)
        
        char2idx = {"<PAD>": 0, "<UNK>": 1}
        for s in sentences:
            for c in s:
                if c not in char2idx:
                    char2idx[c] = len(char2idx)
                    
        tag2idx = {"B": 0, "M": 1, "E": 2, "S": 3}
        idx2tag = {v: k for k, v in tag2idx.items()}
        
        # 划分数据集
        train_sentences, test_sentences, train_labels, test_labels = train_test_split(
            sentences, labels, test_size=0.2, random_state=42
        )
        
        # 创建数据集（动态填充）
        train_dataset = DynamicNERDataset(train_sentences, train_labels, char2idx, tag2idx)
        test_dataset = DynamicNERDataset(test_sentences, test_labels, char2idx, tag2idx)
        
        # 创建数据加载器
        train_loader = DataLoader(
            train_dataset, 
            batch_size=args.batch_size, 
            shuffle=True,
            collate_fn=DynamicNERDataset.collate_fn
        )
        test_loader = DataLoader(
            test_dataset,
            batch_size=args.batch_size,
            collate_fn=DynamicNERDataset.collate_fn
        )
        
        # 初始化模型
        model = BiLSTM_CRF(
            len(char2idx), 
            args.embed_dim, 
            args.hidden_dim, 
            len(tag2idx),
            dropout=args.dropout
        ).to(device)
        
        # 创建优化器和调度器
        optimizer, scheduler = create_optimizer(model)
        
        # 初始化损失记录列表
        train_losses = []
        test_losses = []

        # 训练循环
        early_stopper = EarlyStopper(patience=args.patience)
        num_epochs = 50
        for epoch in range(num_epochs):
            train_loss = train(model, train_loader, optimizer, device)
            test_loss = evaluate(model, test_loader, device)
            scheduler.step(test_loss)  # 学习率调度
            
            # 记录每个 epoch 的损失
            train_losses.append(train_loss)
            test_losses.append(test_loss)
            
            print(f"Epoch {epoch+1:02d} | Train Loss: {train_loss:.4f} | Test Loss: {test_loss:.4f}")
            
            if early_stopper.check(test_loss):
                print(f"Early stopping at epoch {epoch+1}")
                break
        
        # 最终评估
        print("Test Set Metrics:")
        test_metrics = calculate_metrics(model, test_loader, device, idx2tag)
        
        # 保存模型和字典
        print("Saving model and dictionaries...")
        torch.save({
            'model_state_dict': model.state_dict(),
            'char2idx': char2idx,
            'tag2idx': tag2idx,
            'args': vars(args)
        }, args.model_path)

        with open(args.dict_path, 'wb') as f:
            pickle.dump((char2idx, tag2idx, idx2tag), f)
        
        # 绘制训练和测试损失曲线
        plt.figure(figsize=(12, 6))
        plt.plot(range(1, epoch + 2), train_losses, label='Train Loss')
        plt.plot(range(1, epoch + 2), test_losses, label='Test Loss')
        plt.xlabel('Epochs')
        plt.ylabel('Loss')
        plt.title('Training and Testing Loss Over Epochs')
        plt.legend()
        plt.show()

        # 启动交互界面
        interactive_predict(model, char2idx, idx2tag, device)