import pandas as pd
qa_df = pd.read_parquet('./content/eli5_data/qa_train.parquet')
print(f"原始数据总共有 {len(qa_df)} 条记录")
sample_qa_df = qa_df.sample(20, random_state=42) # In this sample code, we will only optimize pipeline with 50 samples.
sample_qa_df = sample_qa_df.reset_index()
sample_qa_df.to_parquet('./content/eli5_data/qa_sample.parquet')


# 检查并删除 query 列为空的行
# print(f"query 列空值数量: {sample_qa_df['query'].isna().sum()}")
import os
# os.makedirs('./content/project_dir')
import nest_asyncio
nest_asyncio.apply()
#
# from transformers import AutoTokenizer
#
# # 这会下载并缓存 tokenizer
# tokenizer = AutoTokenizer.from_pretrained("gpt2")

print("ok")
from autorag.evaluator import Evaluator
evaluator = Evaluator(qa_data_path='./content/eli5_data/qa_sample.parquet', corpus_data_path='./content/eli5_data/corpus.parquet',
                      project_dir='./content/project_dir')
evaluator.start_trial(yaml_path = r'./content/config.yaml', skip_validation=True)

