Spencer525 commited on
Commit
309db3a
·
verified ·
1 Parent(s): 1295662

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -9,7 +9,6 @@ import torch
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
  from PIL import Image
11
  import io
12
- from langsmith import LangSmith
13
 
14
  # Configure Gemini API
15
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
@@ -24,8 +23,10 @@ mistral_model = AutoModelForCausalLM.from_pretrained(model_path_mistral, torch_d
24
  openelm_270m_instruct = AutoModelForCausalLM.from_pretrained("apple/OpenELM-1_1B", trust_remote_code=True)
25
  tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-hf")
26
 
27
- # LangSmith setup
28
- langsmith = LangSmith(api_key=os.getenv("LANGSMITH_API_KEY"))
 
 
29
 
30
  def process_pdf(file_path, question):
31
  model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
@@ -59,10 +60,6 @@ def generate(newQuestion, num):
59
  output_text = tokenizer.decode(output_ids[0].tolist(), skip_special_tokens=True)
60
  return output_text
61
 
62
- def evaluate_with_langsmith(text):
63
- # Hypothetical evaluation logic using LangSmith
64
- return langsmith.evaluate_text(text)
65
-
66
  def process_input(file, image, question, gen_length):
67
  try:
68
  if file is not None:
 
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
  from PIL import Image
11
  import io
 
12
 
13
  # Configure Gemini API
14
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
 
23
  openelm_270m_instruct = AutoModelForCausalLM.from_pretrained("apple/OpenELM-1_1B", trust_remote_code=True)
24
  tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-hf")
25
 
26
+ # 替代的LangSmith評估函數
27
+ def evaluate_with_langsmith(text):
28
+ score = len(text.split()) # 根據生成文本的字數評分
29
+ return f"Score: {score}"
30
 
31
  def process_pdf(file_path, question):
32
  model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
 
60
  output_text = tokenizer.decode(output_ids[0].tolist(), skip_special_tokens=True)
61
  return output_text
62
 
 
 
 
 
63
  def process_input(file, image, question, gen_length):
64
  try:
65
  if file is not None: