import pandas as pd
import json
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from sentence_transformers import SentenceTransformer
from sys3_位置编码 import PositionEncoding

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


# ========== 1️⃣ 数据集定义：支持多轮对话记忆 ==========
class MultiTurnDataset(Dataset):
    def __init__(self, path='data/data.csv', max_rows=None, max_context=5):
        super().__init__()
        if path.endswith('.csv'):
            self.df = pd.read_csv(path, nrows=max_rows, encoding='utf-8')
        else:
            self.df = pd.read_json(path)
            if max_rows:
                self.df = self.df.iloc[:max_rows]
        self.max_context = max_context
        self.samples = self._build_samples()

    def _build_samples(self):
        samples = []

        for _, row in self.df.iterrows():
            messages = json.loads(row['messages']) if isinstance(row['messages'], str) else row['messages']
            system_prompt = ''
            dialog = []

            for msg in messages:
                if msg['role'] == 'system':
                    system_prompt = msg['content']
                elif msg['role'] in ('user', 'assistant'):
                    dialog.append(msg)

            # 构建多轮样本
            for i in range(len(dialog)):
                if dialog[i]['role'] != 'assistant':
                    continue  # 只在 assistant 回复时生成样本

                # 上下文限制（往前取若干轮）
                start_idx = max(0, i - self.max_context * 2)
                context = [system_prompt] if system_prompt else []

                for j in range(start_idx, i):
                    role = dialog[j]['role']
                    content = dialog[j]['content']
                    prefix = "用户：" if role == 'user' else "心理师："
                    context.append(prefix + content)

                src = "\n".join(context)  # 输入：到当前 assistant 前一轮为止
                tgt = dialog[i]['content']  # 输出：当前 assistant 回复

                samples.append((src, tgt))

        return samples

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        return self.samples[idx]


# ========== 2️⃣ Tokenizer包装 ==========
embedding = SentenceTransformer(r"E:\pythonLearning\shanghai_hqyj_ai\bge_small_zh").eval()
for p in embedding.parameters():
    p.requires_grad = False


def tokenizer(texts, max_len=128):
    result = embedding.tokenizer(
        texts,
        add_special_tokens=True,
        max_length=max_len,
        padding='max_length',
        truncation=True,
        return_tensors='pt'
    )
    input_ids = result['input_ids']
    attn_mask = (result['attention_mask'] == 0)  # 布尔类型mask
    return input_ids, attn_mask


# ========== 3️⃣ Transformer 模型 ==========
class LanguageModel(nn.Module):
    def __init__(self):
        super().__init__()
        embed_dim = embedding[0].auto_model.config.hidden_size
        self.nhead = 8
        self.pe = PositionEncoding(embed_dim, device=device)
        self.word_embeddings = embedding._first_module().auto_model.embeddings.word_embeddings.to(device)

        self.transformer = nn.Transformer(
            d_model=embed_dim,
            nhead=self.nhead,
            num_encoder_layers=8,
            num_decoder_layers=8,
            dim_feedforward=2048,
            batch_first=True,
            norm_first=True,
            dropout=0.1
        )

        self.fc_out = nn.Linear(embed_dim, embedding.tokenizer.vocab_size).to(device)

    def forward(self, src, tgt, src_pad_mask, tgt_pad_mask):
        src = self.word_embeddings(src)
        tgt = self.word_embeddings(tgt)
        src = self.pe(src)
        tgt = self.pe(tgt)

        # 使用布尔类型的tgt_mask以匹配其他mask的类型
        tgt_mask = nn.Transformer.generate_square_subsequent_mask(tgt.size(1), device=device).bool()
        y = self.transformer(
            src, tgt,
            tgt_mask=tgt_mask,
            src_key_padding_mask=src_pad_mask,
            tgt_key_padding_mask=tgt_pad_mask,
            memory_key_padding_mask=src_pad_mask,
            tgt_is_causal=True
        )
        return self.fc_out(y)


# ========== 4️⃣ 主训练循环 ==========
if __name__ == '__main__':
    Epochs = 50
    batch_size = 5
    lr = 1e-4

    ds = MultiTurnDataset('data/PsyDTCorpus_train_mulit_turn_packing.json',max_rows=80, max_context=4)
    dl = DataLoader(ds, batch_size=batch_size, shuffle=True)

    model = LanguageModel().to(device)
    try:
        model.load_state_dict(torch.load('weights/best_model_s.pth', map_location=device))
        print('✅ 加载模型成功')
    except:
        print('⚠️ 加载模型失败，重新训练')

    loss_fn = nn.CrossEntropyLoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
    best_loss = float('inf')

    for epoch in range(Epochs):
        model.train()
        total_loss = 0

        for i, (src, tgt) in enumerate(dl):
            src_ids, src_mask = tokenizer(src)
            tgt_ids, tgt_mask = tokenizer(tgt)

            src_ids, tgt_ids = src_ids.to(device), tgt_ids.to(device)
            src_mask, tgt_mask = src_mask.to(device), tgt_mask.to(device)

            optimizer.zero_grad()
            output = model(src_ids, tgt_ids[:, :-1], src_mask, tgt_mask[:, :-1])
            loss = loss_fn(
                output.reshape(-1, output.shape[-1]),
                tgt_ids[:, 1:].reshape(-1)
            )
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

            if (i + 1) % 10 == 0:
                print(f"Epoch[{epoch + 1}/{Epochs}] Step[{i + 1}/{len(dl)}] Loss={loss.item():.4f}")

        avg_loss = total_loss / len(dl)
        print(f"Epoch[{epoch + 1}/{Epochs}] 平均Loss={avg_loss:.4f}")

        if avg_loss < best_loss:
            best_loss = avg_loss
            torch.save(model.state_dict(), 'weights/best_model_s.pth')
            print(f"🔥 新最佳模型保存，Loss={best_loss:.4f}")

    torch.save(model.state_dict(), 'weights/model_last.pth')
    print("✅ 训练结束，模型已保存")
