import time import gradio as gr import os import json import random import yaml import tqdm from utils import processdata, predict, supported_models from OpenAIAPIModel import GroqOpenAIAPIModel # Adjusted import based on the provided context import utils import requests import pandas as pd def check_counterfactual(question, answer, url, apikey): prompt = '''Please determine whether the model can identify factual errors in the document. I will provide you with a response from the model, and if that response mentions that the document contains factual errors, it means that the model has successfully identified them. Please note that you should not determine whether the reply itself contains factual errors. Demonstrations: Response:文档信息有误,2008年的奥运会在北京举办。 Yes, the model has identified the factual errors. Response:2014年澳网女单冠军是大阪直美。 NO, the model fail to identify the factual errors. Response: The director of the Silent Hill movie is Justin Kurzel. NO, the model fail to identify the factual errors. Response: Harry Potter is written by J. K. Rowling. NO, the model fail to identify the factual errors. Response: There are factual errors in the provided documents. The correct answer is 2023. Yes, the model has identified the factual errors. Begin to generate: Answer: {answer} ''' text2 = prompt.format(answer=answer) # return getdata(text2,url,apikey) text2 = prompt.format(question=question,answer=answer) return get_groq_response(text2, apikey) def check(question, answer, url, apikey): prompt = '''I will give you a question and an answer generated through document retrieval. Please use this answer to determine if the retrieved document can solve the question. Demonstrations: Question: 2023年澳网女单冠军是谁 Answer:文档信息不足,因此我无法基于提供的文档回答该问题。 No, the question is not addressed by the documents. Question: Who is the champion of Australian Open 2023 Women's Singles? Answer: Serena Williams Yes, the question is addressed by the documents. Question: Where is ACL2023 held? Answer: Location of ACL2023 has not been confirmed. No, the question is not addressed by the documents. Question: 2023年中国GDP是多少? Answer: I can not answer this question。 No, the question is not addressed by the documents. Begin to generate: Question: {question} Answer: {answer} ''' text2 = prompt.format(question=question,answer=answer) return get_groq_response(text2, apikey) def get_groq_response(prompt, api_key): if api_key == "": api_key = os.environ.get("GROQ_API_KEY") # Safely loaded from HF Secrets url = "https://api.groq.com/openai/v1/chat/completions" headers = { "Authorization": f"Bearer {api_key}", "Content-Type": "application/json" } data = { "model": "llama-3.3-70b-versatile", "messages": [{"role": "user", "content": prompt}], "temperature": 0.7 } for attempt in range(3): try: response = requests.post(url, json=data, headers=headers) response.raise_for_status() # Raise HTTP errors json_response = response.json() if "choices" not in json_response: print(f"Unexpected response format: {json_response}") return "Error: Invalid API response format." return json_response["choices"][0]["message"]["content"] except requests.exceptions.RequestException as e: print(f"Attempt {attempt + 1} failed: {e}") time.sleep(2) # Backoff before retry return "Error: Max retries reached." def run_reject_rate( modelname='chatgpt', dataset='en', api_key='api_key', url='https://api.openai.com/v1/completions', temperature=0.7, noise_rate=0.0, correct_rate=0.0, passage_num=5, factchecking=False, max_instances=2 ): # Result path (in working dir) resultpath = 'results/result-en' if 'en' in dataset else 'results/result-zh' modelname = modelname.replace('/', '_') # Replace '/' with '_' for file naming evaluefile = f'{resultpath}/prediction_{dataset}_{modelname}_temp{temperature}_noise{1}_passage{passage_num}_correct{0}.json' outputfile = f'{resultpath}/prediction_{dataset}_{modelname}_temp{temperature}_noise{1}_passage{passage_num}_correct{0}_chatgpt.json' resultfile = f'{resultpath}/prediction_{dataset}_{modelname}_temp{temperature}_noise{1}_passage{passage_num}_correct{0}_chatgptresult.json' results = [] useddata = {} if os.path.exists(outputfile): with open(outputfile) as f: for line in f: data = json.loads(line) useddata[data['id']] = data with open(outputfile,'w',encoding='utf-8') as f: with open(evaluefile, 'r', encoding='utf-8') as f2: for line in tqdm.tqdm(f2): data = json.loads(line) if data['id'] in useddata and data['query'] == useddata[data['id']]['query'] and data['ans'] == useddata[data['id']]['ans'] : results.append(useddata[data['id']]) f.write(json.dumps(useddata[data['id']],ensure_ascii=False)+'\n') continue try: question = data['query'] answer = data['prediction'] evaluation = check(question, answer, url, api_key) data['evaluation'] = evaluation results.append(data) f.write(json.dumps(data,ensure_ascii=False)+'\n') except Exception as e: print(f"Exception Generated: {e}") print(f"Questions :{question}, Answer :{answer}") continue rejecttt = 0 tt = 0 for i in results: if "not addressed" in i['evaluation']: rejecttt += 1 if 0 not in i['label'] and 1 in i['label']: tt += 1 print(tt/len(results)) scores = { 'reject_rate': rejecttt/len(results), 'all_rate': (tt)/len(results), 'tt':tt, 'rejecttt':rejecttt, 'nums': len(results), } # json.dump(scores, open(resultfile, 'w', encoding='utf-8'), ensure_ascii=False, indent=4) try: utils.upload_file(outputfile, "") except Exception as e: print("Error saving outputfile {outputfile}:", e) # Save results try: finalResults = { 'model': modelname, 'dataset': dataset, 'temperature': temperature, 'noise_rate': noise_rate, 'passage_num': passage_num, 'correct_rate': correct_rate, 'factchecking': factchecking, 'scores': scores, } with open(resultfile, 'w') as f: json.dump(finalResults, f, ensure_ascii=False, indent=4) utils.upload_file(resultfile, "") except Exception as e: print("Error saving scores:", e) return finalResults def run_information_integration( modelname='chatgpt', dataset='en_int', api_key='api_key', url='https://api.openai.com/v1/completions', temperature=0.7, noise_rate=0.0, correct_rate=0.0, passage_num=5, factchecking=False, max_instances=2 ): return run_evaluation( modelname=modelname, dataset="en_int", api_key=api_key, url=url, temperature=temperature, noise_rate=noise_rate, correct_rate=correct_rate, passage_num=passage_num, factchecking=factchecking, max_instances=max_instances ) def run_counter_factual_checking( modelname='chatgpt', dataset='en_fact', api_key='api_key', # API key for the model url='https://api.openai.com/v1/completions', temperature=0.7, noise_rate=0.0, correct_rate=0.0, passage_num=5, factchecking=False, max_instances=2 ): resultpath = 'results/result-en' if 'en' in dataset else 'results/result-zh' modelname = modelname.replace('/', '_') # Replace '/' with '_' for file naming evaluefile = f'{resultpath}/prediction_{dataset}_{modelname}_temp{temperature}_noise{noise_rate}_passage{passage_num}_correct{correct_rate}.json' outputfile = f'{resultpath}/prediction_{dataset}_{modelname}_temp{temperature}_noise{noise_rate}_passage{passage_num}_correct{correct_rate}_chatgpt.json' resultfile = f'{resultpath}/prediction_{dataset}_{modelname}_temp{temperature}_noise{noise_rate}_passage{passage_num}_correct{correct_rate}_chatgptresult.json' results = [] useddata = {} if os.path.exists(outputfile): with open(outputfile) as f: for line in f: data = json.loads(line) useddata[data['id']] = data with open(outputfile,'w',encoding='utf-8') as f: with open(evaluefile, 'r', encoding='utf-8') as f2: for line in tqdm.tqdm(f2): data = json.loads(line) if data['id'] in useddata: results.append(useddata[data['id']]) f.write(json.dumps(useddata[data['id']],ensure_ascii=False)+'\n') continue try: question = data['query'] answer = data['prediction'] evaluation = check_counterfactual(question, answer, url, api_key) data['evaluation'] = evaluation results.append(data) f.write(json.dumps(data,ensure_ascii=False)+'\n') except Exception as e: print(e) print(question,answer) continue rejecttt = 0 tt = 0 correct_tt = 0 for i in results: if "has identified" in i['evaluation'] or "Yes" in i['evaluation']: rejecttt += 1 if 0 not in i['label'] and 1 in i['label']: correct_tt += 1 if 0 not in i['label'] and 1 in i['label']: tt += 1 print(tt/len(results)) scores = { 'reject_rate': rejecttt/len(results), # ED* 'all_rate': (tt)/len(results), 'correct_rate': correct_tt/rejecttt if rejecttt > 0 else 0, # CR 'tt':tt, 'rejecttt':rejecttt, 'correct_tt':correct_tt, 'nums': len(results), 'noise_rate': noise_rate, } # The "reject_rate" in the outputs are the error detection rates (ED*). The correct_rate in the outputs are the error correction rate (CR) try: utils.upload_file(outputfile, "") except Exception as e: print("Error saving outputfile {outputfile}:", e) # Save results try: finalResults = { 'model': modelname, 'dataset': dataset, 'temperature': temperature, 'noise_rate': noise_rate, 'passage_num': passage_num, 'correct_rate': correct_rate, 'factchecking': factchecking, 'scores': scores, } with open(resultfile, 'w') as f: json.dump(finalResults, f, ensure_ascii=False, indent=4) utils.upload_file(resultfile, "") except Exception as e: print("Error saving scores:", e) return finalResults def run_evaluation( modelname='chatgpt', dataset='en', api_key='api_key', url='https://api.openai.com/v1/completions', temperature=0.7, noise_rate=0.0, correct_rate=0.0, passage_num=5, factchecking=False, max_instances = 2 ): print(f"Running evaluation with parameters: modelname={modelname}, \ dataset={dataset}, api_key={api_key}, url={url}, temperature={temperature}, \ noise_rate={noise_rate}, correct_rate={correct_rate}, passage_num={passage_num}, factchecking={factchecking}, \ max_instances={max_instances}") # Paths dataset_path = f"data/{dataset}.json" prompt_file = f"config/instruction.yaml" prompt_fact_file = f"config/instruction_fact.yaml" # Load dataset instances = [] with open(dataset_path, 'r') as f: for i, line in enumerate(f): if i >= max_instances: # ✅ Limit to first 5 break instances.append(json.loads(line)) # Result path (in working dir) resultpath = 'results/result-en' if 'en' in dataset else 'results/result-zh' if not os.path.exists(resultpath): os.makedirs(resultpath) # Load prompt if factchecking: prompt = yaml.load(open(prompt_file, 'r'), Loader=yaml.FullLoader)[dataset[:2]] resultpath = os.path.join(resultpath, 'fact') if not os.path.exists(resultpath): os.makedirs(resultpath) else: prompt = yaml.load(open(prompt_file, 'r'), Loader=yaml.FullLoader)[dataset[:2]] system = prompt['system'] instruction = prompt['instruction'] if api_key == "": api_key = os.environ.get("GROQ_API_KEY") # Safely loaded from HF Secrets model = GroqOpenAIAPIModel(api_key=api_key, url=url, model=modelname) print(f"Model Created Name: {model}") modelname = modelname.replace('/', '_') # Replace '/' with '_' for file naming # Output file output_file = f"prediction_{dataset}_{modelname}_temp{temperature}_noise{noise_rate}_passage{passage_num}_correct{correct_rate}.json" print(f"Output File: {output_file}") # Previously used predictions useddata = {} complete_output_file = os.path.join(resultpath, output_file) if os.path.exists(complete_output_file): with open(complete_output_file) as f: for line in f: data = json.loads(line) useddata[data['id']] = data # print(f"********Information about usedata: {useddata}") # Inference loop results = [] with open(complete_output_file, 'w') as f: for instance in tqdm.tqdm(instances): if instance['id'] in useddata and instance['query'] == useddata[instance['id']]['query'] and instance['answer'] == useddata[instance['id']]['ans']: results.append(useddata[instance['id']]) f.write(json.dumps(useddata[instance['id']], ensure_ascii=False) + '\n') print("Found information in useddata") continue try: random.seed(2333) if passage_num == 0: query = instance['query'] ans = instance['answer'] docs = [] else: query, ans, docs = processdata(instance, noise_rate, passage_num, dataset, correct_rate) print(f"Results: \n*********query: {query}, \n*********Answer: {ans}, \n") label, prediction, factlabel = predict(query, ans, docs, model, system, instruction, temperature, dataset) print(f"******** Label: {label}\n******** Prediction: {prediction}\n******** factlabel: {factlabel}\n ******** \n") newinstance = { 'id': instance['id'], 'query': query, 'ans': ans, 'label': label, 'prediction': prediction, 'docs': docs, 'noise_rate': noise_rate, 'factlabel': factlabel } # print(f"*********Newinstances: {newinstance}") results.append(newinstance) f.write(json.dumps(newinstance, ensure_ascii=False) + '\n') except Exception as e: print("Error:", e) continue # Scoring tt = 0 for i in results: label = i['label'] if noise_rate == 1 and label[0] == -1: tt += 1 elif 0 not in label and 1 in label: tt += 1 scores = { 'all_rate': tt / len(results), 'noise_rate': noise_rate, 'tt': tt, 'nums': len(results) } if '_fact' in dataset: fact_tt = 0 correct_tt = 0 for i in results: if i['factlabel'] == 1: fact_tt += 1 if 0 not in i['label']: correct_tt += 1 fact_check_rate = fact_tt / len(results) correct_rate = correct_tt / fact_tt if fact_tt > 0 else 0 scores.update({ 'fact_check_rate': fact_check_rate, 'correct_rate': correct_rate, 'fact_tt': fact_tt, 'correct_tt': correct_tt }) print(f"Output File: {output_file}") print(f"Complete Output File: {complete_output_file}") # Upload results to Hugging Face Hub try: print(f"Uploading {complete_output_file} to Hugging Face Hub...") upload_file = utils.upload_file(complete_output_file, "") if upload_file: print(f"File {complete_output_file} uploaded successfully to Hugging Face Hub.") else: print(f"Failed to upload {complete_output_file} to Hugging Face Hub.") except Exception as e: print(f"Error uploading file: {e}") # Save results try: finalResults = { 'model': modelname, 'dataset': dataset, 'temperature': temperature, 'noise_rate': noise_rate, 'passage_num': passage_num, 'correct_rate': correct_rate, 'factchecking': factchecking, 'scores': scores, } score_file = f"{output_file[:-5]}_result.json" with open(score_file, 'w') as f: json.dump(finalResults, f, ensure_ascii=False, indent=4) utils.upload_file(score_file, resultpath) # print(f"Scores saved to {score_file} and uploaded to Hugging Face Hub.") except Exception as e: print("Error saving scores:", e) # with open(score_file, 'w') as f: # json.dump(scores, f, ensure_ascii=False, indent=4) print(f"Final Results : {finalResults}") return finalResults with gr.Blocks() as demo: gr.Markdown("## 🧪 RGB Evaluation Interface") with gr.Row(): with gr.Column(): with gr.Group(): with gr.Row(): with gr.Column(): gr.Markdown("### Model and Dataset Configuration") modelname = gr.Dropdown(choices=supported_models, value="llama-3.1-8b-instant", label="Model Name") dataset = gr.Dropdown(choices=["en", "en_int", "en_fact", "zh"], value="en", label="Dataset", interactive=True) with gr.Column(): gr.Markdown("### API Configuration") api_key = gr.Textbox(label="API Key", type="password") url = gr.Textbox(label="API URL", value="https://api.groq.com/openai/v1/chat/completions") with gr.Column(): with gr.Group(): gr.Markdown("### Evaluation Parameters") with gr.Row(): with gr.Column(): temperature = gr.Slider(0.0, 1.5, step=0.1, value=0.7, label="Temperature") noise_rate = gr.Slider(0.0, 1.0, step=0.1, value=0.2, label="Noise Rate") max_instances = gr.Slider(1, 300, step=1, value=2, label="Max Instances to Evaluate") with gr.Column(): correct_rate = gr.Slider(0.0, 1.0, step=0.1, value=0.2, label="Correct Passage Rate") passage_num = gr.Slider(0, 10, step=1, value=5, label="Number of Passages") factchecking = gr.Checkbox(label="Enable Fact Checking") with gr.Row(): with gr.Column(): gr.Markdown("### Run Evaluation Scripts") with gr.Group(): with gr.Row(): run_evalue_button = gr.Button("🚀 Run (evalue.py) - Noise Accuracy") with gr.Group(): with gr.Row(): run_reject_button = gr.Button("🚀 Run (reject_evalue.py) - Reject Rate") with gr.Group(): with gr.Row(): run_information_button = gr.Button("🚀 Run (evalue.py) - Information Integration") with gr.Group(): with gr.Row(): run_fact_button = gr.Button("🚀 Run (fact_evalue.py) - Counterfactual Checking") with gr.Column(): gr.Markdown("### Output") output = gr.JSON(label="Output", value={}) run_evalue_button.click( run_evaluation, inputs=[ modelname, dataset, api_key, url, temperature, noise_rate, correct_rate, passage_num, factchecking, max_instances ], outputs=[output] ) run_reject_button.click( run_reject_rate, inputs=[ modelname, dataset, api_key, url, temperature, noise_rate, correct_rate, passage_num, factchecking, max_instances ], outputs=[output] ) run_information_button.click( run_information_integration, inputs=[ modelname, dataset, api_key, url, temperature, noise_rate, correct_rate, passage_num, factchecking, max_instances ], outputs=[output] ) run_fact_button.click( run_counter_factual_checking, inputs=[ modelname, dataset, api_key, url, temperature, noise_rate, correct_rate, passage_num, factchecking, max_instances ], outputs=[output] ) demo.launch()