import mlx.core as mx
import mlx.nn as nn
import mlx.optimizers as optim
from mlx.utils import tree_flatten
from datasets import load_dataset
import numpy as np
import json
import os
from mlx_lm import load, generate

# 1. 加载模型和分词器 - 使用 mlx_lm 加载
model, tokenizer = load("/Users/deep/Project/models/Qwen/qwen2.5-0.5B")
tokenizer.pad_token = tokenizer.eos_token  # 设置填充token

# 2. 加载数据集 - 使用完整路径
dataset_path = os.path.join(os.path.dirname(__file__), "manufacturing_data.csv")
dataset = load_dataset("csv", data_files=dataset_path)

# 3. 数据预处理函数
def preprocess_function(examples):
    inputs = examples["input"]
    outputs = examples["output"]
    
    # 创建对话格式
    texts = [
        f"<|im_start|>system\n你是一个精密制造排程专家<|im_end|>\n"
        f"<|im_start|>user\n{ins}<|im_end|>\n"
        f"<|im_start|>assistant\n{out}<|im_end|>"
        for ins, out in zip(inputs, outputs)
    ]
    
    # 使用分词器编码文本 - 修复 TokenizerWrapper 调用方式
    tokenized = tokenizer._tokenizer(
        texts,
        max_length=1024,
        padding="max_length",
        truncation=True,
        return_tensors="np"
    )
    return {
        "input_ids": tokenized["input_ids"],
        "attention_mask": tokenized["attention_mask"]
    }

# 4. 准备数据集
tokenized_dataset = dataset.map(
    preprocess_function,
    batched=True,
    batch_size=10,
    remove_columns=dataset["train"].column_names
)

# 5. 创建自定义数据加载器
class ManufacturingDataLoader:
    def __init__(self, dataset, batch_size=4, shuffle=True):
        self.dataset = dataset
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.indices = np.arange(len(dataset))
        if shuffle:
            np.random.shuffle(self.indices)
    
    def __iter__(self):
        for start_idx in range(0, len(self.dataset), self.batch_size):
            batch_indices = self.indices[start_idx:start_idx+self.batch_size]
            batch = self.dataset[batch_indices]
            
            # 转换为MLX数组
            input_ids = mx.array(batch["input_ids"])
            attention_mask = mx.array(batch["attention_mask"])
            
            yield input_ids, attention_mask

# 6. 创建数据加载器
train_loader = ManufacturingDataLoader(
    tokenized_dataset["train"],
    batch_size=4,
    shuffle=True
)

# 7. 训练配置
learning_rate = 5e-5
optimizer = optim.Adam(learning_rate=learning_rate)
num_epochs = 10

# 8. 损失函数（优化版）
def loss_fn(model, inputs, attention_mask):
    # 目标序列是输入序列向右偏移一位
    targets = inputs[:, 1:]
    
    # 前向传播 - mlx-lm模型直接返回logits
    logits = model(inputs)
    
    # 计算损失 - 只计算预测部分
    shift_logits = logits[:, :-1, :]
    shift_targets = targets
    
    # 计算交叉熵损失
    loss = nn.losses.cross_entropy(
        shift_logits.reshape(-1, shift_logits.shape[-1]),
        shift_targets.reshape(-1),
        reduction="mean"
    )
    
    return loss

# 主训练循环
for epoch in range(num_epochs):
    epoch_loss = 0
    num_batches = 0
    
    for batch in train_loader:
        inputs, attention_mask = batch
        
        # 计算损失和梯度 - 修复参数传递
        loss_and_grad_fn = nn.value_and_grad(model, loss_fn)
        loss, grads = loss_and_grad_fn(model, inputs, attention_mask)
        
        # 更新模型参数
        optimizer.update(model, grads)
        mx.eval(model.parameters(), optimizer.state)
        
        epoch_loss += loss.item()
        num_batches += 1
        
        print(f"Epoch {epoch+1} | Batch {num_batches} | Loss: {loss.item():.4f}")
    
    avg_loss = epoch_loss / num_batches
    print(f"Epoch {epoch+1}/{num_epochs} | Avg Loss: {avg_loss:.4f}")

# 9. 保存微调后的模型
output_dir = "qwen0.5b_finetuned_manufacturing"

# 创建输出目录
os.makedirs(output_dir, exist_ok=True)

# 保存模型和分词器
model.save_weights(os.path.join(output_dir, "model.npz"))
tokenizer.save_pretrained(output_dir)

# 添加制造领域元数据
metadata = {
    "domain": "精密制造",
    "precision": "秒级",
    "supported_processes": ["车削", "研磨", "抛光", "热处理"],
    "trained_samples": len(tokenized_dataset["train"])
}

with open(os.path.join(output_dir, "manufacturing_metadata.json"), "w") as f:
    json.dump(metadata, f, ensure_ascii=False)

print(f"精密制造排程模型已保存到: {output_dir}")

# 验证模型
print("\n模型验证示例:")
prompt = "<|im_start|>system\n你是一个精密制造排程专家<|im_end|>\n<|im_start|>user\n激光切割需要3小时，抛光需要2小时，如何安排最优？<|im_end|>\n<|im_start|>assistant"

try:
    # 使用兼容的参数调用生成函数
    response = generate(model, tokenizer, prompt=prompt, max_tokens=100)
    print(f"模型回复: {response}\n")
except Exception as e:
    print(f"模型验证失败: {str(e)}")
    
    # 尝试重新加载模型进行验证
    try:
        print("尝试重新加载模型进行验证...")
        reload_model, reload_tokenizer = load(output_dir)
        reload_response = generate(reload_model, reload_tokenizer, prompt=prompt, max_tokens=100)
        print(f"重新加载后模型回复: {reload_response}\n")
    except Exception as e2:
        print(f"重新加载模型验证失败: {str(e2)}")

# 复制原始模型配置文件
print("\n复制原始模型配置文件...")
import shutil
try:
    # 复制配置文件
    shutil.copy("/Users/deep/Project/models/Qwen/qwen2.5-0.5B/config.json", output_dir)
    print(f"配置文件已复制到 {output_dir}")
except Exception as e:
    print(f"配置文件复制失败: {str(e)}")

# 添加交互式推理界面
print("\n=== 制造业智能助手 ===")
print("输入您关于生产排程、工艺优化或设备维护的问题（输入 'exit' 退出）")

while True:
    user_input = input("\n用户: ")
    if user_input.lower() == 'exit':
        print("对话已结束")
        break
        
    # 构建提示
    prompt = f"<|im_start|>system\n你是一个精密制造排程专家<|im_end|>\n<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant"
    
    try:
        # 添加调试信息
        print(f"提示长度: {len(prompt)} 字符")
        print(f"模型输入: {prompt[:100]}...")
        
        # 生成响应 - 使用正确的参数名称 (temp)
        response = generate(
            model, 
            tokenizer, 
            prompt=prompt, 
            max_tokens=512
        )
        
        # 添加响应调试信息
        print(f"原始响应: {response}")
        
        # 清理输出并打印
        cleaned_response = response.split("<|im_end|>")[0].strip()
        if not cleaned_response:
            print("\n助手: 抱歉，我无法生成响应。请尝试更具体的问题。")
        else:
            print(f"\n助手: {cleaned_response}")
    except Exception as e:
        import traceback
        print(f"生成响应失败: {str(e)}")
        print(f"错误详情: {traceback.format_exc()}")
        break
