Spaces:
Runtime error
Runtime error
from transformers import GPTJForCausalLM, GPT2Tokenizer | |
import torch | |
# Загрузка токенизатора и модели | |
tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-j-6B") | |
model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model = model.to(device) | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model = model.to(device) | |
from transformers import MarianMTModel, MarianTokenizer | |
# Функции для перевода | |
def translate_to_english(text): | |
tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-ru-en") | |
model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-ru-en") | |
translated = model.generate(**tokenizer.prepare_seq2seq_batch([text], return_tensors="pt")) | |
return tokenizer.decode(translated[0]) | |
def translate_to_russian(text): | |
tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ru") | |
model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-ru") | |
translated = model.generate(**tokenizer.prepare_seq2seq_batch([text], return_tensors="pt")) | |
return tokenizer.decode(translated[0]) | |
def generate_response(input_text): | |
# Кодирование входного текста и генерация ответа | |
input_ids = tokenizer.encode(input_text, return_tensors='pt') | |
output = model.generate(input_ids, max_length=1500, num_return_sequences=1, pad_token_id=50256, | |
num_beams=5, early_stopping=True) | |
# Декодирование и возвращение ответа | |
response = tokenizer.decode(output[0], skip_special_tokens=True) | |
return response | |
# Тестирование функции | |
input_text = "What is the capital of France?" | |
response = generate_response(input_text) | |
print(response) | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model = model.to(device) | |
user_input = "" | |
while user_input.lower() != 'exit': | |
user_input = input("You: ") | |
if user_input.lower() != 'exit': | |
user_input_english = translate_to_english(user_input) | |
response_english = generate_response(user_input_english) | |
response_russian = translate_to_russian(response_english) | |
print(f"Bot: {response_russian}") | |
import gradio as gr | |
def predict(message, history): | |
user_input_english = translate_to_english(message) | |
response_english = generate_response(user_input_english) | |
response_russian = translate_to_russian(response_english) | |
return response_russian | |
demo = gr.ChatInterface( | |
predict, | |
title='LLM for MyChatENRU' | |
) | |
demo.launch() | |