from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
from torch.utils.data import Dataset, DataLoader
import torch
from torch.optim import Adam
from torch.nn import CrossEntropyLoss

# 加载模型和分词器
model_name = "./deepseek/"  # 示例：加载蒸馏后的 7B 模型
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

input_sentence = ["你好啊", "你多大了", "我想去吃午饭",'你给我讲个故事']
output_sentence = ["我很好", "我今年18岁了", "正好，我们一起去",'从前有座山，山上有座庙，庙里有两个和尚']

class CustomDataset(Dataset):
    def __init__(self, texts, labels):
        self.texts = texts
        self.labels = labels

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        return self.texts[idx], self.labels[idx]

dataset = CustomDataset(input_sentence, output_sentence)
dataloader = DataLoader(dataset, batch_size=2, shuffle=True)

def process_data(tokenizer, texts, labels):
    inputs = tokenizer(texts, padding="max_length", truncation=True, max_length=512, return_tensors="pt")
    outputs = tokenizer(labels, padding="max_length", truncation=True, max_length=512, return_tensors="pt")
    return inputs, outputs

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
model.to(device)

optimizer = Adam(model.parameters(), lr=1e-5)
criterion = CrossEntropyLoss()

for epoch in range(3):  # 假设训练 3 个 epoch
    model.train()
    total_loss=0
    for texts, labels in dataloader:
        inputs, labels = process_data(tokenizer, texts, labels)
        inputs = {k: v.to(device) for k, v in inputs.items()}
        labels = labels.to(device)

        outputs = model(**inputs)

        loss = criterion(outputs.logits.view(-1,outputs.logits.size(2)), labels.input_ids.view(-1))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_loss+=loss.item()

    print(f"Epoch {epoch + 1}, Loss: {total_loss}")

model.eval()
def question_answering(content):
    # 准备输入文本
    input_text = content
    inputs = tokenizer(input_text, return_tensors="pt")

    # 生成输出
    output = model.generate(**inputs, max_new_tokens=100)
    return tokenizer.decode(output[0], skip_special_tokens=False)

# 创建Gradio界面
interface = gr.Interface(
    fn=question_answering,
    inputs=["text"],  # 输入分别为context和question
    outputs="text",  # 输出为答案
)

interface.launch()