KingNish commited on
Commit
e9060e2
1 Parent(s): dcc8dfb

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +15 -2
chatbot.py CHANGED
@@ -24,6 +24,7 @@ from huggingface_hub import InferenceClient
24
  from PIL import Image
25
  import spaces
26
  from functools import lru_cache
 
27
 
28
  # Set device to CUDA if available, otherwise CPU
29
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -300,6 +301,15 @@ def format_prompt(user_prompt, chat_history):
300
  prompt += f"[INST] {user_prompt} [/INST]"
301
  return prompt
302
 
 
 
 
 
 
 
 
 
 
303
 
304
  # Define a function for model inference
305
  @spaces.GPU(duration=30, queue=False)
@@ -331,7 +341,7 @@ def model_inference(
331
  )
332
  # Format the prompt for the language model
333
  formatted_prompt = format_prompt(
334
- f"""You are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Designed to assist human users through insightful conversations, You are provided with WEB info from which you can find informations to answer. You do not say Unnecesarry things Only say thing which is important and relevant. You also has the ability to generate images. BY Utilizing the following link structure, : ![](https://image.pollinations.ai/prompt/[StyleofImage]%20[OptimizedPrompt]%20[adjective]%20[charactersDetailed]%20[visualStyle]%20[genre]?width=[width]&height=[height]&nologo=poll&nofeed=yes&seed=[random]) For image generation, You replace info inside bracets with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. Note: 1. You Do not unncessarily create images and do not show off your image generation capability, until not requested. 2. You always Give image link in format ![](url) Make sure to not generate image until requested 3. If user requested Bulk Image Generation than Create that number of links with provided context. 4. Always learn from previous conversation. 5. Always try to connect conversation with history. 6. Do not make conversation too long. 7. Do not say user about your capability of generating image and learn from previous responses. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations.You remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question. Make sure to not generate image until requested [USER] {user_prompt} [WEB] {web2} [OpenGPT 4o]""",
335
  chat_history)
336
  # Generate the response from the language model
337
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
@@ -350,7 +360,7 @@ def model_inference(
350
  )
351
  # Format the prompt for the language model
352
  formatted_prompt = format_prompt(
353
- f"""You are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Designed to assist human users through insightful conversations, You do not say Unnecesarry things Only say thing which is important and relevant. You also has the ability to generate images. BY Utilizing the following link structure, : ![](https://image.pollinations.ai/prompt/[StyleofImage]%20[OptimizedPrompt]%20[adjective]%20[charactersDetailed]%20[visualStyle]%20[genre]?width=[width]&height=[height]&nologo=poll&nofeed=yes&seed=[random]) For image generation, You replace info inside bracets with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. Note: 1. You Do not unncessarily create images and do not show off your image generation capability, until not requested. 2. You always Give image link in format ![](url) 3. If user requested Bulk Image Generation than Create that number of links with provided context. 4. Always learn from previous conversation. 5. Always try to connect conversation with history. 6. Do not make conversation too long. 7. Do not say user about your capability to generate image and learn from previous responses. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations. I am constantly evolving, ensuring that I remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question. [USER] {user_prompt} [OpenGPT 4o]""",
354
  chat_history)
355
  # Generate the response from the language model
356
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
@@ -361,6 +371,8 @@ def model_inference(
361
  if not response.token.text == "</s>":
362
  output += response.token.text
363
  yield output
 
 
364
  return
365
  else:
366
  if user_prompt["text"].strip() == "" and not user_prompt["files"]:
@@ -421,6 +433,7 @@ def model_inference(
421
  if acc_text.endswith("<end_of_utterance>"):
422
  acc_text = acc_text[:-18]
423
  yield acc_text
 
424
  return
425
 
426
 
 
24
  from PIL import Image
25
  import spaces
26
  from functools import lru_cache
27
+ import io # Add this import for working with image bytes
28
 
29
  # Set device to CUDA if available, otherwise CPU
30
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
301
  prompt += f"[INST] {user_prompt} [/INST]"
302
  return prompt
303
 
304
+ chat_history = []
305
+ history = ""
306
+
307
+ def update_history(answer="", question=""):
308
+ global chat_history
309
+ global history
310
+ history += f"([ USER: {question}, OpenGPT 4o: {answer} ]),"
311
+ chat_history.append((question, answer))
312
+ return history
313
 
314
  # Define a function for model inference
315
  @spaces.GPU(duration=30, queue=False)
 
341
  )
342
  # Format the prompt for the language model
343
  formatted_prompt = format_prompt(
344
+ f"""You are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. You are provided with WEB info from which you can find informations to answer. You do not say Unnecesarry things Only say thing which is important and relevant. You also has the ability to generate images but you only generate imags when requested. BY Utilizing the following link structure, : ![](https://image.pollinations.ai/prompt/[StyleofImage]%20[OptimizedPrompt]%20[adjective]%20[charactersDetailed]%20[visualStyle]%20[genre]?width=[width]&height=[height]&nologo=poll&nofeed=yes&seed=[random]) For image generation, You replace info inside bracets with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. Note: 1. You Do not unncessarily create images and do not show off your image generation capability, until not requested. 2. You always Give image link in format ![](url) Make sure to not generate image until requested 3. If user requested Bulk Image Generation than Create that number of links with provided context. 4. Always learn from previous conversation. 5. Always try to connect conversation with history. 6. Do not make conversation too long. 7. Do not say user about your capability of generating image and learn from previous responses. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations.You remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question. Make sure to not generate image until requested [USER] {user_prompt} [WEB] {web2} [OpenGPT 4o]""",
345
  chat_history)
346
  # Generate the response from the language model
347
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
 
360
  )
361
  # Format the prompt for the language model
362
  formatted_prompt = format_prompt(
363
+ f"""You are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. You do not say Unnecesarry things Only say thing which is important and relevant. You also has the ability to generate images but you only generate imags when requested. BY Utilizing the following link structure, : ![](https://image.pollinations.ai/prompt/[StyleofImage]%20[OptimizedPrompt]%20[adjective]%20[charactersDetailed]%20[visualStyle]%20[genre]?width=[width]&height=[height]&nologo=poll&nofeed=yes&seed=[random]) For image generation, You replace info inside bracets with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. Note: 1. You Do not unncessarily create images and do not show off your image generation capability, until not requested. 2. You always Give image link in format ![](url) 3. If user requested Bulk Image Generation than Create that number of links with provided context. 4. Always learn from previous conversation. 5. Always try to connect conversation with history. 6. Do not make conversation too long. 7. Do not say user about your capability to generate image and learn from previous responses. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations. I am constantly evolving, ensuring that I remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question. {history} . [USER] {user_prompt} [OpenGPT 4o]""",
364
  chat_history)
365
  # Generate the response from the language model
366
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
 
371
  if not response.token.text == "</s>":
372
  output += response.token.text
373
  yield output
374
+ update_history(output, user_prompt)
375
+ print(history)
376
  return
377
  else:
378
  if user_prompt["text"].strip() == "" and not user_prompt["files"]:
 
433
  if acc_text.endswith("<end_of_utterance>"):
434
  acc_text = acc_text[:-18]
435
  yield acc_text
436
+ update_history(acc_text, user_prompt)
437
  return
438
 
439