File size: 2,818 Bytes
9f9e124
a3c91ee
 
9f9e124
a3c91ee
 
 
 
9f9e124
a3c91ee
 
 
 
 
 
 
9f9e124
a3c91ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f9e124
a3c91ee
 
9f9e124
a3c91ee
 
 
 
 
 
 
 
 
 
 
9f9e124
a3c91ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f9e124
 
a3c91ee
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

class TextGenerator:
    def __init__(self, model_name, device='cpu'):
        self.device = device
        self.load_model(model_name)

    def load_model(self, model_name):
        # Load model and tokenizer from Hugging Face
        print("Loading model and tokenizer...")
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModelForCausalLM.from_pretrained(model_name)
        self.model.to(self.device)
        print("Model loaded successfully!")

    def generate_text(self, prompt, max_length=100, temperature=0.7, top_k=50, top_p=0.9):
        # Tokenize input
        input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(self.device)
        
        # Generate text
        with torch.no_grad():
            output_ids = self.model.generate(
                input_ids,
                max_length=max_length,
                temperature=temperature,
                top_k=top_k,
                top_p=top_p,
                do_sample=True,
                pad_token_id=self.tokenizer.eos_token_id
            )
        
        # Decode output tokens
        generated_text = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
        return generated_text

def create_gradio_interface(model_name):
    generator = TextGenerator(model_name)

    def generate(prompt, max_length, temperature, top_k, top_p):
        try:
            return generator.generate_text(
                prompt=prompt,
                max_length=max_length,
                temperature=temperature,
                top_k=top_k,
                top_p=top_p
            )
        except Exception as e:
            return f"Error: {str(e)}"

    # Define Gradio interface
    interface = gr.Interface(
        fn=generate,
        inputs=[
            gr.Textbox(label="Prompt", placeholder="Enter your prompt here..."),
            gr.Slider(minimum=10, maximum=500, value=100, step=10, label="Maximum Length"),
            gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
            gr.Slider(minimum=0, maximum=100, value=50, step=5, label="Top-k"),
            gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.1, label="Top-p"),
        ],
        outputs=gr.Textbox(label="Generated Text"),
        title="Reality123b/Xylaria-1.4-Senoa-Test",
        description="Generate text using the Reality123b/Xylaria-1.4-Senoa-Test model optimized for CPU usage.",
    )
    return interface

if __name__ == "__main__":
    # Use the model from Hugging Face
    model_name = "Reality123b/Xylaria-1.4-Senoa-Test"
    
    # Create and launch Gradio interface
    interface = create_gradio_interface(model_name)
    interface.launch(share=True)