Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,9 @@ from transformers import AutoTokenizer
|
|
3 |
from langchain.chat_models import ChatOpenAI
|
4 |
import os, sys, json
|
5 |
import gradio as gr
|
|
|
|
|
|
|
6 |
|
7 |
# access token with permission to access the model and PRO subscription
|
8 |
#HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ")
|
@@ -28,16 +31,20 @@ def generate(text, history):
|
|
28 |
top_p=0.9,
|
29 |
temperature=0.6,
|
30 |
)
|
31 |
-
|
|
|
|
|
|
|
32 |
return res.strip()
|
33 |
|
34 |
-
|
35 |
-
#
|
|
|
|
|
36 |
|
37 |
# create evaluator
|
38 |
-
|
39 |
|
40 |
-
evaluation_llm = ChatOpenAI(model="gpt-4")
|
41 |
|
42 |
################################################
|
43 |
#GUI
|
|
|
3 |
from langchain.chat_models import ChatOpenAI
|
4 |
import os, sys, json
|
5 |
import gradio as gr
|
6 |
+
from langchain.evaluation import load_evaluator
|
7 |
+
from pprint import pprint as print
|
8 |
+
|
9 |
|
10 |
# access token with permission to access the model and PRO subscription
|
11 |
#HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ")
|
|
|
31 |
top_p=0.9,
|
32 |
temperature=0.6,
|
33 |
)
|
34 |
+
#zum Evaluieren:
|
35 |
+
eval_result = evaluator.evaluate_strings(prediction=pred, input=prompt)
|
36 |
+
print ("eval_result:............ ")
|
37 |
+
print(eval_result)
|
38 |
return res.strip()
|
39 |
|
40 |
+
########################################
|
41 |
+
#Evaluation
|
42 |
+
########################################
|
43 |
+
evaluation_llm = ChatOpenAI(model="gpt-4")
|
44 |
|
45 |
# create evaluator
|
46 |
+
evaluator = load_evaluator("criteria", criteria="conciseness", llm=evaluation_llm)
|
47 |
|
|
|
48 |
|
49 |
################################################
|
50 |
#GUI
|