File size: 1,390 Bytes
0ca0ad7
64b1575
a88e577
0609008
c827f23
 
a88e577
 
 
64b1575
3719203
0609008
c827f23
0609008
 
c827f23
64b1575
3719203
 
64b1575
c827f23
 
64b1575
 
 
 
 
3719203
19fec0d
 
 
c827f23
0ca0ad7
 
978099a
0609008
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
import pandas as pd
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer

# Initialize the Hugging Face pipeline with GPT-4 model
model_name = "EleutherAI/gpt-neo-2.7B"  # Change to your desired GPT-4 model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)

def generate_solutions(query):
    # Use the language model to generate solutions
    responses = generator(query, max_length=100, num_return_sequences=3)
    
    # Extract the generated texts
    solutions = [{"Solution": response['generated_text'].strip(), "Link": "https://example.com"} for response in responses]
    
    # Convert solutions to a DataFrame
    df = pd.DataFrame(solutions)
    
    # Convert DataFrame to HTML table with clickable links
    table_html = df.to_html(escape=False, index=False, render_links=True)
    
    return table_html

# Create a Gradio interface
iface = gr.Interface(
    fn=generate_solutions, 
    inputs=gr.Textbox(lines=2, placeholder="Describe the problem with the machine..."), 
    outputs=gr.HTML(),
    title="Oroz: Your Industry Maintenance Assistant",
    description="Describe the problem with your machine, and get an organized table of suggested solutions with web links."
)

iface.launch(share=True)