File size: 4,137 Bytes
6552662
97dce6a
6552662
cad8400
 
97dce6a
6faf128
dfc296a
 
 
 
 
d673e94
48fa3fe
97dce6a
dfc296a
97dce6a
 
 
 
 
48fa3fe
ae07959
c43c8ba
cad8400
 
d673e94
cad8400
 
 
81c6112
cad8400
 
c43c8ba
cad8400
 
 
 
d673e94
cad8400
 
 
 
 
 
 
d673e94
cad8400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b254c31
 
2bc826c
 
 
 
 
 
 
 
b254c31
2bc826c
 
 
 
 
b254c31
2bc826c
 
 
 
 
 
 
 
 
 
 
 
 
ae07959
b254c31
ae07959
 
b254c31
ae07959
 
 
 
 
 
 
 
 
b254c31
ae07959
 
b254c31
fc440c4
 
 
 
6552662
 
 
fc440c4
6552662
fc440c4
6552662
 
 
fc440c4
6552662
 
 
 
 
 
 
fc440c4
6552662
 
fc440c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import gradio as gr

gr.load("models/microsoft/Phi-3.5-mini-instruct",  max_batch_size=1000).launch(share=True)




# def generate_responce(user_input):
#     gr.load("models/microsoft/Phi-3.5-mini-instruct")

#     inputs = tokenize(user_input, return_tensor="pt")
#     outputs = 


# gradio_app = gr.Interface(
#     fn=generate_responce,
#     inputs="text",
#     outputs="text",
#     max_batch_size=50,
#     title="Advertisment companion",
# )



# from transformers import AutoTokenizer, AutoModelForCausalLM
# import torch

# # Load the model and tokenizer
# tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-mini-instruct", trust_remote_code=True)

# # Define the role prompt for advertisement assistance
# # role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"

# # Function to generate responses
# def generate_response(user_input):
#     # Prepend role information to user input
#     # input_text =  user_input
    
#     # Tokenize and generate response
#     inputs = tokenizer(user_input, return_tensors="pt")
#     outputs = model.generate(
#         **inputs, 
#         max_new_tokens=100,   # Increase this if you want longer responses
#                  # Nucleus sampling to control randomness
#     )
    
#     # Decode and return the response
#     response = tokenizer.batch_decode(outputs, skip_special_tokens=True)
#     return response

# # Set up Gradio interface
# interface = gr.Interface(
#     fn=generate_response,
#     inputs="text",
#     outputs="text",
#     title="Advertisement Assistant Chatbot",
#     description="Ask me anything related to advertising. I'm here to help!"
# )

# # Launch the Gradio app with sharing enabled
# interface.launch(share=True)


# import gradio as gr
# from transformers import pipeline

# # Load the model pipeline for text generation
# generator = pipeline("text-generation", model="microsoft/Phi-3.5-mini-instruct")

# # Define the role prompt for advertisement assistance
# role_prompt = "You are an advertisement assistant. Respond professionally and helpfully to advertising-related questions.\n\n"

# # Function to generate responses
# def generate_response(user_input):
#     input_text = role_prompt + user_input
#     response = generator(input_text, max_new_tokens=50, temperature=0.7, top_p=0.9)
#     return response[0]["generated_text"]

# # Set up Gradio interface
# interface = gr.Interface(
#     fn=generate_response,
#     inputs="text",
#     outputs="text",
#     title="Advertisement Assistant Chatbot",
#     description="Ask me anything related to advertising. I'm here to help!"
# )

# # Launch the Gradio app with sharing enabled
# interface.launch(share=True)


# import gradio as gr

# # Load the model using gr.load()
# model_interface = gr.load("models/microsoft/Phi-3.5-mini-instruct")

# # Create a wrapper interface to customize the appearance
# interface = gr.Interface(
#     fn=model_interface,
#     inputs="text",
#     outputs="text",
#     title="Advertisement Assistant Chatbot",
#     description="Ask me anything related to advertising. I'm here to help! This assistant provides professional guidance on advertising queries.",
#     theme="default",  # Optional: Choose a theme or style
# )

# # Launch with sharing enabled
# interface.launch(share=True)





# import gradio as gr
# from transformers import pipeline
# huggingface-cli login

# text_generator = pipeline("text-generation", model="meta-llama/Llama-3.2-1B")

# def predict(input_text):
#     predictions = text_generator(input_text, max_new_tokens=50, num_return_sequences=1)
#     return predictions[0]["generated_text"]

# gradio_app = gr.Interface(
#     predict,
#     inputs=gr.Textbox(label="Enter text for generation"),
#     outputs=gr.Textbox(label="Generated Text"),
#     title="Text Generation Model",
#     description="This app generates text based on input prompts."
# )

# if __name__ == "__main__":
#     gradio_app.launch()