import os
import logging
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
)
from datasets import load_dataset, DatasetDict
from peft import LoraConfig, get_peft_model
import torch
from torch.utils.data import DataLoader
from torch.optim import AdamW
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np

# 设置日志级别
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 1. 基本配置
current_dir = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(current_dir, "models")  # 本地模型路径
train_file = os.path.join(current_dir, "train.json")
output_dir = os.path.join(current_dir, "output")  # 修改为英文目录名
log_file = os.path.join(output_dir, "training_log.txt")  # 训练日志文件

logger.info(f"使用本地模型: {model_path}")
logger.info(f"训练文件: {train_file}")
logger.info(f"输出目录: {output_dir}")

# 创建输出目录
os.makedirs(output_dir, exist_ok=True)

# 创建并初始化训练日志文件
def write_log(message):
    with open(log_file, "a", encoding="utf-8") as f:
        f.write(message + "\n")
    logger.info(message)

# 记录训练配置
write_log("=== 训练配置 ===")
write_log(f"模型路径: {model_path}")
write_log(f"训练文件: {train_file}")
write_log(f"输出目录: {output_dir}")
write_log(f"批次大小: 4")
write_log(f"学习率: 5e-4")
write_log(f"训练轮数: 3")
write_log(f"梯度累积步数: 4")
write_log(f"LoRA配置: r=8, alpha=32, dropout=0.1")
write_log("=" * 50)

# 2. 加载分词器和模型
logger.info("正在加载分词器...")
try:
    tokenizer = AutoTokenizer.from_pretrained(
        model_path,
        trust_remote_code=True,
        local_files_only=True,
        use_fast=True
    )
    logger.info("分词器加载成功")
except Exception as e:
    logger.error(f"加载分词器失败: {str(e)}")
    raise

logger.info("正在加载模型...")
try:
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        trust_remote_code=True,
        local_files_only=True,
        device_map="auto",
        torch_dtype=torch.float16,
        use_safetensors=True
    )
    logger.info("模型加载成功")
except Exception as e:
    logger.error(f"加载模型失败: {str(e)}")
    raise

# 3. LoRA 配置
logger.info("配置 LoRA...")
lora_config = LoraConfig(
    r=16,  # 增加 r 的值以提高容量
    lora_alpha=64,  # 提高 alpha 的值以增加训练灵活性
    target_modules=[
        "q_proj", "k_proj", "v_proj", "o_proj",  # 注意力模块
        "ffn", "mlp", "output",  # 添加前馈网络和输出层
        "input_layernorm", "post_attention_layernorm",  # 添加规范化层
    ],
    lora_dropout=0.05,  # 降低 dropout 以增强模型稳定性
    bias="all",  # 微调所有偏置参数
    task_type="CAUSAL_LM",  # 自回归语言模型任务
)
model = get_peft_model(model, lora_config)

# 4. 加载并分割数据集
logger.info("正在加载数据集...")
raw_ds = load_dataset("json", data_files={"train": train_file})
ds = raw_ds["train"].train_test_split(test_size=0.05, seed=42)
dataset = DatasetDict({
    "train": ds["train"],
    "validation": ds["test"]
})

# 5. 数据预处理
logger.info("正在预处理数据...")
def preprocess(examples):
    inputs = [c + tokenizer.eos_token for c in examples["content"]]
    targets = [o + tokenizer.eos_token for o in examples["output"]]
    full = [inp + tgt for inp, tgt in zip(inputs, targets)]
    tokenized = tokenizer(
        full,
        max_length=512,
        truncation=True,
        padding="max_length"
    )
    tokenized["labels"] = tokenized["input_ids"].copy()
    return tokenized

tokenized_ds = dataset.map(
    preprocess,
    batched=True,
    remove_columns=["id", "content", "output"]
)

# 6. 创建数据加载器
train_dataloader = DataLoader(
    tokenized_ds["train"],
    batch_size=4,
    shuffle=True,
    collate_fn=lambda x: {
        k: torch.tensor([item[k] for item in x])
        for k in x[0].keys()
    }
)

eval_dataloader = DataLoader(
    tokenized_ds["validation"],
    batch_size=4,
    shuffle=False,
    collate_fn=lambda x: {
        k: torch.tensor([item[k] for item in x])
        for k in x[0].keys()
    }
)

# 7. 训练配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
optimizer = AdamW(model.parameters(), lr=5e-4)
num_epochs = 3
gradient_accumulation_steps = 4

# 8. 训练循环
logger.info("开始训练...")
write_log("\n=== 开始训练 ===")
model.train()

# 用于存储训练过程中的损失值
train_losses = []
eval_losses = []

for epoch in range(num_epochs):
    total_loss = 0
    progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch + 1}/{num_epochs}")
    
    for step, batch in enumerate(progress_bar):
        # 将数据移到设备上
        batch = {k: v.to(device) for k, v in batch.items()}
        
        # 前向传播
        outputs = model(**batch)
        loss = outputs.loss / gradient_accumulation_steps
        loss.backward()
        
        # 梯度累积
        if (step + 1) % gradient_accumulation_steps == 0:
            optimizer.step()
            optimizer.zero_grad()
        
        total_loss += loss.item() * gradient_accumulation_steps
        current_loss = total_loss / (step + 1)
        progress_bar.set_postfix({"loss": current_loss})
        
        # 记录每个step的损失
        train_losses.append(current_loss)
        
        # 每100步保存一次
        if (step + 1) % 100 == 0:
            model.save_pretrained(output_dir)
            tokenizer.save_pretrained(output_dir)
            log_message = f"保存检查点 - 轮次: {epoch + 1}, 步数: {step + 1}, 损失: {current_loss:.4f}"
            write_log(log_message)
            
            # 计算验证集损失
            model.eval()
            eval_loss = 0
            with torch.no_grad():
                for eval_batch in eval_dataloader:
                    eval_batch = {k: v.to(device) for k, v in eval_batch.items()}
                    outputs = model(**eval_batch)
                    eval_loss += outputs.loss.item()
            eval_loss /= len(eval_dataloader)
            eval_losses.append(eval_loss)
            model.train()
            
            log_message = f"验证损失 - 轮次: {epoch + 1}, 步数: {step + 1}, 损失: {eval_loss:.4f}"
            write_log(log_message)

    # 每个epoch结束后记录平均损失
    epoch_avg_loss = total_loss / len(train_dataloader)
    log_message = f"Epoch {epoch + 1} 完成 - 平均损失: {epoch_avg_loss:.4f}"
    write_log(log_message)

# 9. 保存最终模型
logger.info("保存最终模型...")
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
write_log("\n=== 训练完成 ===")
write_log(f"最终模型已保存到: {output_dir}")
logger.info(f"训练完成，模型已保存到 {output_dir}")

# 10. 绘制损失曲线
plt.figure(figsize=(10, 6))
plt.plot(train_losses, label='训练损失', color='blue')
plt.plot(np.linspace(0, len(train_losses)-1, len(eval_losses)), eval_losses, label='验证损失', color='red')
plt.title('训练过程中的损失变化')
plt.xlabel('训练步数')
plt.ylabel('损失值')
plt.legend()
plt.grid(True)

# 保存损失曲线图
loss_plot_path = os.path.join(output_dir, 'loss_plot.png')
plt.savefig(loss_plot_path)
plt.close()

write_log(f"损失曲线图已保存到: {loss_plot_path}")
logger.info(f"损失曲线图已保存到: {loss_plot_path}")
