Spaces:
Running
Running
#!/usr/bin/python3 | |
# -*- coding: utf-8 -*- | |
import argparse | |
from datetime import datetime | |
import json | |
import os | |
from pathlib import Path | |
import sys | |
import time | |
from zoneinfo import ZoneInfo # Python 3.9+ 自带,无需安装 | |
pwd = os.path.abspath(os.path.dirname(__file__)) | |
sys.path.append(os.path.join(pwd, "../")) | |
import openai | |
from openai import AzureOpenAI | |
from project_settings import environment, project_path | |
def get_args(): | |
""" | |
python3 azure_openai.py --model_name gpt-4o-mini \ | |
--eval_dataset_name agent-lingoace-zh-400-choice.jsonl \ | |
--client "us_west(47.88.76.239)" \ | |
--create_time_str 20250723_095001 \ | |
--interval 10 | |
python3 azure_openai.py --model_name gpt-4o-mini \ | |
--eval_dataset_name arc-easy-1000-choice.jsonl \ | |
--client "us_west(47.88.76.239)" \ | |
--create_time_str 20250723_111000 \ | |
--interval 10 | |
""" | |
parser = argparse.ArgumentParser() | |
parser.add_argument( | |
"--model_name", | |
# default="gpt-4o", | |
default="gpt-4o-mini", | |
type=str | |
) | |
parser.add_argument( | |
"--eval_dataset_name", | |
# default="agent-lingoace-zh-400-choice.jsonl", | |
# default="arc-easy-1000-choice.jsonl", | |
default="agent-bingoplus-ph-90-choice.jsonl", | |
type=str | |
) | |
parser.add_argument( | |
"--eval_dataset_dir", | |
default=(project_path / "data/dataset").as_posix(), | |
type=str | |
) | |
parser.add_argument( | |
"--eval_data_dir", | |
default=(project_path / "data/eval_data").as_posix(), | |
type=str | |
) | |
parser.add_argument( | |
"--client", | |
default="shenzhen_sase", | |
type=str | |
) | |
parser.add_argument( | |
"--service", | |
default="west_us_chatgpt_openai_azure_com", | |
type=str | |
) | |
parser.add_argument( | |
"--create_time_str", | |
default="null", | |
type=str | |
) | |
parser.add_argument( | |
"--interval", | |
default=5, | |
type=int | |
) | |
args = parser.parse_args() | |
return args | |
def main(): | |
args = get_args() | |
eval_dataset_dir = Path(args.eval_dataset_dir) | |
eval_dataset_dir.mkdir(parents=True, exist_ok=True) | |
eval_data_dir = Path(args.eval_data_dir) | |
eval_data_dir.mkdir(parents=True, exist_ok=True) | |
if args.create_time_str == "null": | |
tz = ZoneInfo("Asia/Shanghai") | |
now = datetime.now(tz) | |
create_time_str = now.strftime("%Y%m%d_%H%M%S") | |
# create_time_str = "20250722_173400" | |
else: | |
create_time_str = args.create_time_str | |
eval_dataset = eval_dataset_dir / args.eval_dataset_name | |
output_file = eval_data_dir / f"azure_openai/azure/{args.model_name}/{args.client}/{args.service}/{create_time_str}/{args.eval_dataset_name}" | |
output_file.parent.mkdir(parents=True, exist_ok=True) | |
service_params = environment.get(args.service, dtype=json.loads) | |
client = AzureOpenAI( | |
**service_params, | |
# api_key="Dqt75blRABmhgrwhfcupd1rq44YqNuEgku8FcFFDrEljMq6gltf0JQQJ99BCACYeBjFXJ3w3AAABACOG2njW", | |
# api_version="2025-01-01-preview", | |
# azure_endpoint="https://west-us-chatgpt.openai.azure.com" | |
) | |
total = 0 | |
total_correct = 0 | |
# finished | |
finished_idx_set = set() | |
if os.path.exists(output_file.as_posix()): | |
with open(output_file.as_posix(), "r", encoding="utf-8") as f: | |
for row in f: | |
row = json.loads(row) | |
idx = row["idx"] | |
total = row["total"] | |
total_correct = row["total_correct"] | |
finished_idx_set.add(idx) | |
print(f"finished count: {len(finished_idx_set)}") | |
with open(eval_dataset.as_posix(), "r", encoding="utf-8") as fin, open(output_file.as_posix(), "a+", encoding="utf-8") as fout: | |
for row in fin: | |
row = json.loads(row) | |
idx = row["idx"] | |
prompt = row["prompt"] | |
response = row["response"] | |
if idx in finished_idx_set: | |
continue | |
finished_idx_set.add(idx) | |
try: | |
time.sleep(args.interval) | |
print(f"sleep: {args.interval}") | |
time_begin = time.time() | |
llm_response = client.chat.completions.create( | |
model=args.model_name, | |
messages=[{"role": "user", "content": prompt}], | |
stream=False, | |
max_tokens=1, | |
top_p=0.95, | |
temperature=0.6, | |
logit_bias={ | |
32: 100, | |
33: 100, | |
34: 100, | |
35: 100, | |
36: 100, | |
37: 100, | |
38: 100, | |
39: 100, | |
} | |
) | |
time_cost = time.time() - time_begin | |
print(f"time_cost: {time_cost}") | |
except openai.BadRequestError as e: | |
print(f"request failed, error type: {type(e)}, error text: {str(e)}") | |
continue | |
prediction = llm_response.choices[0].message.content | |
correct = 1 if prediction == response else 0 | |
total += 1 | |
total_correct += correct | |
score = total_correct / total | |
row_ = { | |
"idx": idx, | |
"prompt": prompt, | |
"response": response, | |
"prediction": prediction, | |
"correct": correct, | |
"total": total, | |
"total_correct": total_correct, | |
"score": score, | |
"time_cost": time_cost, | |
} | |
row_ = json.dumps(row_, ensure_ascii=False) | |
fout.write(f"{row_}\n") | |
fout.flush() | |
return | |
if __name__ == "__main__": | |
main() | |