agentharbor commited on
Commit
876f61f
1 Parent(s): 9ecf488

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -13
app.py CHANGED
@@ -4,7 +4,11 @@ from huggingface_hub import InferenceClient
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
8
 
9
  global context
10
 
@@ -383,6 +387,49 @@ Link: https://www.deeplearning.ai/short-courses/ai-agents-in-langgraph/
383
 
384
  '''
385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
  def respond(
387
  message,
388
  history: list[tuple[str, str]],
@@ -397,19 +444,10 @@ def respond(
397
 
398
  messages.append({"role": "user", "content": message})
399
 
400
- response = ""
401
-
402
- for message in client.chat_completion(
403
- messages,
404
- max_tokens=768,
405
- stream=True,
406
- temperature=0.3,
407
- top_p=0.3,
408
- ):
409
- token = message.choices[0].delta.content
410
 
411
- response += token
412
- yield response
413
 
414
  """
415
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
+ #client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+ import google.generativeai as genai
9
+ os.environ["API_KEY"] = 'AIzaSyB8Hj4oCbBH9arFWSgybHnbpZLs2sa4p1w'
10
+ os.environ["GOOGLE_API_KEY"] = 'AIzaSyBjuYTWBlHg4W2wGaQCKKbigz6deZuLUJc'
11
+ genai.configure(api_key=os.environ["API_KEY"])
12
 
13
  global context
14
 
 
387
 
388
  '''
389
 
390
+ os.environ["LANGCHAIN_API_KEY"] = "ls__92a67c6930624f93aa427f1c1ad3f59b"
391
+ os.environ["LANGCHAIN_TRACING_V2"] = "true"
392
+ os.environ["LANGCHAIN_PROJECT"] = "agenta"
393
+ generation_config = {
394
+ "temperature": 0.2,
395
+ "top_p": 0.95,
396
+ "top_k": 0,
397
+ "max_output_tokens": 8192,
398
+ }
399
+
400
+ safety_settings = [
401
+ {
402
+ "category": "HARM_CATEGORY_HARASSMENT",
403
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
404
+ },
405
+ {
406
+ "category": "HARM_CATEGORY_HATE_SPEECH",
407
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
408
+ },
409
+ {
410
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
411
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
412
+ },
413
+ {
414
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
415
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
416
+ },
417
+ ]
418
+
419
+ system_instruction = context
420
+ import re
421
+
422
+ model = genai.GenerativeModel(model_name="gemini-1.5-pro-latest",
423
+ generation_config=generation_config,
424
+ system_instruction=system_instruction,
425
+ safety_settings=safety_settings)
426
+
427
+
428
+ def model_response(text):
429
+ #model = genai.GenerativeModel('gemini-pro')
430
+ response = model.generate_content(text)
431
+ return response.text
432
+
433
  def respond(
434
  message,
435
  history: list[tuple[str, str]],
 
444
 
445
  messages.append({"role": "user", "content": message})
446
 
447
+ #response = ""
448
+ response = model_response(messages)
449
+ return response
 
 
 
 
 
 
 
450
 
 
 
451
 
452
  """
453
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface