File size: 4,556 Bytes
683a41b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# Instalação das dependências necessárias
!pip install transformers torch gradio datasets

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextDataset, DataCollatorForLanguageModeling, Trainer, TrainingArguments
import gradio as gr
import json
import os

class GameAIAssistant:
    def __init__(self, model_name="deepseek-ai/DeepSeek-R1"):
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModelForCausalLM.from_pretrained(model_name)
        self.memory = []
        self.max_memory = 10
        self.game_knowledge_file = "game_knowledge.txt"
        
        # Carregar conhecimento existente do jogo
        self.load_game_knowledge()

    def load_game_knowledge(self):
        if os.path.exists(self.game_knowledge_file):
            with open(self.game_knowledge_file, 'r', encoding='utf-8') as f:
                self.game_knowledge = f.read()
        else:
            self.game_knowledge = ""

    def save_game_knowledge(self, new_knowledge):
        with open(self.game_knowledge_file, 'a', encoding='utf-8') as f:
            f.write(new_knowledge + "\n")
        self.game_knowledge += new_knowledge + "\n"

    def generate_response(self, user_input):
        # Combinar memória, conhecimento do jogo e entrada do usuário
        context = f"""Conhecimento do Jogo:
{self.game_knowledge}

Histórico de Conversas:
{' '.join([f'{m["role"]}: {m["content"]}' for m in self.memory[-5:]])}

Usuário: {user_input}
Assistente:"""

        # Gerar resposta
        inputs = self.tokenizer(context, return_tensors="pt", max_length=1024, truncation=True)
        outputs = self.model.generate(
            inputs["input_ids"],
            max_length=2048,
            temperature=0.7,
            top_p=0.9,
            pad_token_id=self.tokenizer.eos_token_id
        )
        
        response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        response = response.split("Assistente:")[-1].strip()

        # Atualizar memória
        self.memory.append({"role": "user", "content": user_input})
        self.memory.append({"role": "assistant", "content": response})
        
        # Manter apenas as últimas N mensagens
        if len(self.memory) > self.max_memory:
            self.memory = self.memory[-self.max_memory:]
            
        return response

    def train_on_new_data(self, training_text):
        # Salvar dados de treinamento
        with open("train_data.txt", "w", encoding="utf-8") as f:
            f.write(training_text)
            
        # Criar dataset
        dataset = TextDataset(
            tokenizer=self.tokenizer,
            file_path="train_data.txt",
            block_size=128
        )
        
        data_collator = DataCollatorForLanguageModeling(
            tokenizer=self.tokenizer,
            mlm=False
        )
        
        # Configurar treinamento
        training_args = TrainingArguments(
            output_dir="./game_ai_model",
            overwrite_output_dir=True,
            num_train_epochs=3,
            per_device_train_batch_size=4,
            save_steps=10_000,
            save_total_limit=2,
        )
        
        # Iniciar treinamento
        trainer = Trainer(
            model=self.model,
            args=training_args,
            data_collator=data_collator,
            train_dataset=dataset,
        )
        
        trainer.train()
        
        # Salvar como conhecimento do jogo
        self.save_game_knowledge(training_text)
        
        return "Treinamento concluído e conhecimento salvo!"

# Inicializar o assistente
assistant = GameAIAssistant()

# Criar interface Gradio
with gr.Blocks() as interface:
    gr.Markdown("# Assistente de IA para Desenvolvimento de Jogos")
    
    with gr.Row():
        with gr.Column():
            chatbot = gr.Textbox(label="Chat")
            msg = gr.Textbox(label="Sua mensagem")
            send = gr.Button("Enviar")
            
        with gr.Column():
            training_data = gr.Textbox(label="Dados de Treinamento", lines=10)
            train = gr.Button("Treinar IA")
    
    # Funções de callback
    def chat(message):
        response = assistant.generate_response(message)
        return response
    
    def train_model(text):
        return assistant.train_on_new_data(text)
    
    # Conectar componentes
    send.click(chat, inputs=msg, outputs=chatbot)
    train.click(train_model, inputs=training_data, outputs=chatbot)

# Iniciar a interface
interface.launch(share=True)