|
# File: docmatix-main/analysis/count_words_in_dataset.py |
|
from collections import Counter |
|
import string |
|
|
|
def count_words(df, column_name): |
|
overall_counter = Counter() |
|
word_counts = [] |
|
for text in df[column_name]: |
|
text = text.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation))) |
|
words = text.lower().split() |
|
word_count = len(words) |
|
word_counts.append(word_count) |
|
overall_counter.update(words) |
|
df['word_count'] = word_counts |
|
most_common_words = overall_counter.most_common(100) |
|
return (df, most_common_words) |
|
|
|
# File: docmatix-main/analysis/plot.py |
|
import matplotlib.pyplot as plt |
|
import pandas as pd |
|
import seaborn as sns |
|
analysis_df = pd.read_json('prompt_analysis_results.json', orient='records', lines=True) |
|
sns.set(style='whitegrid') |
|
plt.figure(figsize=(16, 12)) |
|
plt.subplot(3, 2, 1) |
|
sns.barplot(x='Prompt ID', y='Number of Q/A pairs', data=analysis_df, palette='viridis') |
|
plt.title('Number of Q/A pairs per Prompt ID') |
|
plt.xlabel('Prompt ID') |
|
plt.ylabel('Number of Q/A pairs') |
|
for (i, row) in analysis_df.iterrows(): |
|
plt.text(i, row['Number of Q/A pairs'], f"{row['Number of Q/A pairs'] / 1000000.0:.2f}e6", ha='center', va='bottom') |
|
plt.subplot(3, 2, 2) |
|
sns.barplot(x='Prompt ID', y='Average answer length', data=analysis_df, palette='viridis') |
|
plt.title('Average Answer Length per Prompt ID') |
|
plt.xlabel('Prompt ID') |
|
plt.ylabel('Average Answer Length') |
|
for (i, row) in analysis_df.iterrows(): |
|
plt.text(i, row['Average answer length'], f"{row['Average answer length']:.2f}", ha='center', va='bottom') |
|
plt.subplot(3, 2, 3) |
|
sns.barplot(x='Prompt ID', y='Diversity within documents', data=analysis_df, palette='viridis') |
|
plt.title('Diversity within Documents per Prompt ID') |
|
plt.xlabel('Prompt ID') |
|
plt.ylabel('Diversity within Documents') |
|
for (i, row) in analysis_df.iterrows(): |
|
plt.text(i, row['Diversity within documents'], f"{row['Diversity within documents']:.2f}", ha='center', va='bottom') |
|
plt.subplot(3, 2, 4) |
|
sns.barplot(x='Prompt ID', y='Total empty questions', data=analysis_df, palette='viridis') |
|
plt.title('Total Empty Questions per Prompt ID') |
|
plt.xlabel('Prompt ID') |
|
plt.ylabel('Total Empty Questions') |
|
for (i, row) in analysis_df.iterrows(): |
|
plt.text(i, row['Total empty questions'], f"{row['Total empty questions']}", ha='center', va='bottom') |
|
plt.subplot(3, 2, 5) |
|
sns.barplot(x='Prompt ID', y='Average Q/A pairs per page', data=analysis_df, palette='viridis') |
|
plt.title('Average Q/A pairs per Page per Prompt ID') |
|
plt.xlabel('Prompt ID') |
|
plt.ylabel('Average Q/A pairs per Page') |
|
for (i, row) in analysis_df.iterrows(): |
|
plt.text(i, row['Average Q/A pairs per page'], f"{row['Average Q/A pairs per page']:.2f}", ha='center', va='bottom') |
|
plt.subplot(3, 2, 6) |
|
sns.barplot(x='Prompt ID', y='Number of unique questions', data=analysis_df, palette='viridis') |
|
plt.title('Number of unique questions per Prompt ID') |
|
plt.xlabel('Prompt ID') |
|
plt.ylabel('Number of unique questions') |
|
for (i, row) in analysis_df.iterrows(): |
|
plt.text(i, row['Number of unique questions'], f"{row['Number of unique questions'] / 1000000.0:.2f}e6", ha='center', va='bottom') |
|
plt.tight_layout() |
|
plt.savefig('prompt_analysis_plots_enhanced.png') |
|
plt.show() |
|
report = f"\nPrompt Analysis Report\n=======================\nNumber of Q/A pairs per Prompt ID:\n{analysis_df[['Prompt ID', 'Number of Q/A pairs']]}\n\nAverage answer length per Prompt ID:\n{analysis_df[['Prompt ID', 'Average answer length']]}\n\nUnique questions per Prompt ID:\n{analysis_df[['Prompt ID', 'Number of unique questions']]}\n\nTotal pages per Prompt ID:\n{analysis_df[['Prompt ID', 'Total pages']]}\n\nAverage Q/A pairs per page per Prompt ID:\n{analysis_df[['Prompt ID', 'Average Q/A pairs per page']]}\n\nAverage answer length per page per Prompt ID:\n{analysis_df[['Prompt ID', 'Average answer length per page']]}\n\nDiversity within documents per Prompt ID:\n{analysis_df[['Prompt ID', 'Diversity within documents']]}\n\nTotal empty questions per Prompt ID:\n{analysis_df[['Prompt ID', 'Total empty questions']]}\n\n" |
|
with open('prompt_analysis_report.txt', 'w') as f: |
|
f.write(report) |
|
print('Report and plots generated successfully.') |
|
|
|
# File: docmatix-main/clean_and_create/load_data.py |
|
import os |
|
import re |
|
import io |
|
from io import BytesIO |
|
import pandas as pd |
|
import datasets |
|
from pdf2image import convert_from_bytes |
|
from tqdm import tqdm |
|
from concurrent.futures import ThreadPoolExecutor |
|
import argparse |
|
import fitz |
|
import PIL.Image |
|
tqdm.pandas(desc='Pandas apply progress') |
|
fitz.TOOLS.mupdf_display_errors(False) |
|
DATA_PATH = '/fsx/andi/pdfa_data/' |
|
TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar' |
|
|
|
def resize_large_images(image, max_image_size=2940): |
|
(width, height) = image.size |
|
aspect_ratio = width / height |
|
resized = False |
|
if width >= height and width > max_image_size: |
|
width = max_image_size |
|
height = int(width / aspect_ratio) |
|
resized = True |
|
elif height > width and height > max_image_size: |
|
height = max_image_size |
|
width = int(height * aspect_ratio) |
|
resized = True |
|
if resized: |
|
image = image.resize((width, height), PIL.Image.LANCZOS) |
|
return image |
|
|
|
def _decode_pdf_pages(sample): |
|
try: |
|
image_fmt = 'L' |
|
with io.BytesIO(sample) as b: |
|
doc = fitz.Document(stream=b) |
|
num_image_pages = doc.page_count |
|
decoded_image_pages = [] |
|
for page_index in range(num_image_pages): |
|
page = doc.load_page(page_index) |
|
pixmap = page.get_pixmap(dpi=150) |
|
page_image = PIL.Image.frombuffer('RGB', (pixmap.width, pixmap.height), pixmap.samples) |
|
page_image = resize_large_images(page_image.convert(image_fmt)) |
|
decoded_image_pages += [page_image] |
|
return decoded_image_pages |
|
except Exception as e: |
|
print(f'Error decoding pdf pages: {e}') |
|
return None |
|
|
|
def convert_img_to_png_bytes(img): |
|
with BytesIO() as buffer: |
|
img.save(buffer, format='PNG') |
|
return buffer.getvalue() |
|
|
|
def process_images(pdf_bytes): |
|
images = convert_from_bytes(pdf_bytes, dpi=150) |
|
return [convert_img_to_png_bytes(resize_large_images(img)) for img in images] |
|
|
|
def is_valid_question_or_answer(text): |
|
if not text or text.strip() == '': |
|
return False |
|
patterns = ['\\{.*?\\}', '\\[.*?\\]', '<.*?>', '\\b\\d{1,3}(\\.\\d{1,3}){3}\\b', '\\w+\\.\\w+', '\\n\\s*\\n', 'unanswerable', 'Q\\d+: ', 'A\\d+: '] |
|
return not any((re.search(pattern, text, re.IGNORECASE) for pattern in patterns)) |
|
|
|
def process_group(key_group): |
|
try: |
|
(key, group) = key_group |
|
qa_pairs = [] |
|
for (_, row) in group.iterrows(): |
|
question = re.sub('^Q\\d+: ', '', row['question']) |
|
answer = re.sub('^A\\d+: ', '', row['answer']) |
|
if is_valid_question_or_answer(question) and is_valid_question_or_answer(answer): |
|
qa_pairs.append({'user': question, 'assistant': answer, 'source': 'PDFA key: ' + str(row['__key__'])}) |
|
if qa_pairs: |
|
return {'texts': qa_pairs, 'images': group['pdf'].iloc[0]} |
|
except Exception as e: |
|
print(f'Error processing group {key}: {e}') |
|
return None |
|
|
|
def process_tar_index(tar_index, step_size, question_answer_df): |
|
shard_nr = tar_index // step_size |
|
loaded_datasets = [] |
|
for inner_idx in range(step_size): |
|
tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx)) |
|
try: |
|
print(f'Loading dataset from: {tar_file}') |
|
hf_dataset = datasets.load_dataset('webdataset', split='train', data_files=tar_file, cache_dir='/fsx/.cache').to_pandas() |
|
hf_dataset.__key__ = hf_dataset.__key__.apply(pd.to_numeric) |
|
loaded_datasets.append(hf_dataset) |
|
except Exception as e: |
|
print(f'Error loading dataset from: {tar_file}') |
|
print(e) |
|
hf_dataset = pd.concat(loaded_datasets, ignore_index=True) |
|
print(f'Concatenated datasets with {len(hf_dataset)} samples') |
|
hf_dataset = hf_dataset[hf_dataset['__key__'].isin(question_answer_df['__key__'].unique())] |
|
df_data = pd.DataFrame({'key': []}) |
|
if os.path.exists(f'/fsx/m4/datasets/large_docvqa/shard_{shard_nr}'): |
|
print('using saved data') |
|
df_data = datasets.load_from_disk(f'/fsx/m4/datasets/large_docvqa/shard_{shard_nr}').to_pandas() |
|
df_data['__key__'] = df_data.texts.apply(lambda x: x[0]['source'].split('_')[1]) |
|
df_data['__key__'] = df_data['__key__'].apply(pd.to_numeric) |
|
df_data.drop(columns=['texts'], inplace=True) |
|
hf_dataset = hf_dataset[hf_dataset['__key__'].isin(df_data['__key__'].unique())] |
|
hf_dataset = pd.merge(hf_dataset, df_data, on='__key__', how='inner') |
|
hf_dataset['pdf'] = hf_dataset['images'] |
|
hf_dataset.drop(columns=['images'], inplace=True) |
|
del df_data |
|
else: |
|
hf_dataset['pdf'] = hf_dataset['pdf'].progress_apply(lambda x: process_images(x)) |
|
hf_dataset = hf_dataset[~hf_dataset['pdf'].isnull()] |
|
merged_df = pd.merge(hf_dataset, question_answer_df, on='__key__', how='inner') |
|
data_extracted = [] |
|
max_threads = 10 |
|
with ThreadPoolExecutor(max_threads) as executor: |
|
results = list(tqdm(executor.map(process_group, merged_df.groupby('__key__')), desc='Extracting data', total=len(merged_df['__key__'].unique()))) |
|
data_extracted.extend(results) |
|
data_extracted = list(filter(lambda item: item is not None, data_extracted)) |
|
FEATURES = datasets.Features({'images': datasets.Sequence(datasets.Image(decode=True)), 'texts': [{'user': datasets.Value('string'), 'assistant': datasets.Value('string'), 'source': datasets.Value('string')}]}) |
|
|
|
def data_generator(): |
|
for data_dict in data_extracted: |
|
yield data_dict |
|
ds_shard = datasets.Dataset.from_generator(data_generator, features=FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache') |
|
ds_shard.save_to_disk(f'/fsx/m4/datasets/docvqa_instruct/shard_{shard_nr}') |
|
|
|
def load_and_concatenate_dataframes(): |
|
if os.path.exists('concatenated_synthetic_dataset.parquet.gzip'): |
|
return pd.read_parquet('concatenated_synthetic_dataset.parquet.gzip') |
|
directory = '.' |
|
all_files = os.listdir(directory) |
|
h5_files = sorted([f for f in all_files if re.match('synthetic_dataset_batch_\\d+\\.h5$', f)]) |
|
dataframes = [] |
|
for file in tqdm(h5_files, desc='Loading data'): |
|
file_path = os.path.join(directory, file) |
|
df = pd.read_hdf(file_path) |
|
if '__key__' not in df.columns: |
|
raise ValueError(f'Key column not found in {file_path}') |
|
df.__key__ = df.__key__.apply(pd.to_numeric) |
|
dataframes.append(df) |
|
concatenated_df = pd.concat(dataframes, ignore_index=True) |
|
concatenated_df.to_parquet('concatenated_synthetic_dataset.parquet.gzip', compression='gzip') |
|
return concatenated_df |
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser(description='Process .h5 files and tar indices.') |
|
parser.add_argument('--start_index', type=int, default=0, help='The starting index for tar processing.') |
|
parser.add_argument('--step_size', type=int, default=1, help='The step size for tar processing.') |
|
args = parser.parse_args() |
|
question_answer_df = load_and_concatenate_dataframes() |
|
print(len(question_answer_df)) |
|
process_tar_index(args.start_index, args.step_size, question_answer_df=question_answer_df) |
|
|
|
# File: docmatix-main/create_only_with_pdfs/load_data.py |
|
import os |
|
import re |
|
import pandas as pd |
|
import datasets |
|
from tqdm import tqdm |
|
from concurrent.futures import ThreadPoolExecutor |
|
import argparse |
|
tqdm.pandas(desc='Pandas apply progress') |
|
DATA_PATH = '/fsx/andi/pdfa_data/' |
|
TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar' |
|
|
|
def is_valid_question_or_answer(text): |
|
if not text or text.strip() == '': |
|
return False |
|
patterns = ['\\{.*?\\}', '\\[.*?\\]', '<.*?>', '\\b\\d{1,3}(\\.\\d{1,3}){3}\\b', '\\w+\\.\\w+', '\\n\\s*\\n', 'unanswerable', 'Q\\d+: ', 'A\\d+: '] |
|
return not any((re.search(pattern, text, re.IGNORECASE) for pattern in patterns)) |
|
|
|
def process_group(key_group): |
|
try: |
|
(key, group) = key_group |
|
qa_pairs = [] |
|
for (_, row) in group.iterrows(): |
|
question = re.sub('^Q\\d+: ', '', row['question']) |
|
answer = re.sub('^A\\d+: ', '', row['answer']) |
|
if is_valid_question_or_answer(question) and is_valid_question_or_answer(answer): |
|
qa_pairs.append({'user': question, 'assistant': answer, 'source': 'PDFA key: ' + str(row['__key__'])}) |
|
if qa_pairs: |
|
return {'texts': qa_pairs, 'pdf': group['pdf'].iloc[0]} |
|
except Exception as e: |
|
print(f'Error processing group {key}: {e}') |
|
return None |
|
|
|
def process_tar_index(tar_index, step_size, question_answer_df): |
|
shard_nr = tar_index // step_size |
|
loaded_datasets = [] |
|
for inner_idx in range(step_size): |
|
tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx)) |
|
try: |
|
print(f'Loading dataset from: {tar_file}') |
|
hf_dataset = datasets.load_dataset('webdataset', split='train', data_files=tar_file, cache_dir='/fsx/.cache').to_pandas() |
|
hf_dataset.__key__ = hf_dataset.__key__.apply(pd.to_numeric) |
|
loaded_datasets.append(hf_dataset) |
|
except Exception as e: |
|
print(f'Error loading dataset from: {tar_file}') |
|
print(e) |
|
hf_dataset = pd.concat(loaded_datasets, ignore_index=True) |
|
print(f'Concatenated datasets with {len(hf_dataset)} samples') |
|
hf_dataset = hf_dataset[hf_dataset['__key__'].isin(question_answer_df['__key__'].unique())] |
|
merged_df = pd.merge(hf_dataset, question_answer_df, on='__key__', how='inner') |
|
data_extracted = [] |
|
max_threads = 10 |
|
with ThreadPoolExecutor(max_threads) as executor: |
|
results = list(tqdm(executor.map(process_group, merged_df.groupby('__key__')), desc='Extracting data', total=len(merged_df['__key__'].unique()))) |
|
data_extracted.extend(results) |
|
data_extracted = list(filter(lambda item: item is not None, data_extracted)) |
|
FEATURES = datasets.Features({'pdf': datasets.Value('binary'), 'texts': [{'user': datasets.Value('string'), 'assistant': datasets.Value('string'), 'source': datasets.Value('string')}]}) |
|
|
|
def data_generator(): |
|
for data_dict in data_extracted: |
|
yield data_dict |
|
ds_shard = datasets.Dataset.from_generator(data_generator, features=FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache') |
|
ds_shard.save_to_disk(f'/fsx/m4/datasets/docmatix_pdf/shard_{shard_nr}') |
|
|
|
def load_and_concatenate_dataframes(): |
|
if os.path.exists('/fsx/andi/llm-swarm/concatenated_synthetic_dataset.parquet.gzip'): |
|
return pd.read_parquet('/fsx/andi/llm-swarm/concatenated_synthetic_dataset.parquet.gzip') |
|
directory = '.' |
|
all_files = os.listdir(directory) |
|
h5_files = sorted([f for f in all_files if re.match('synthetic_dataset_batch_\\d+\\.h5$', f)]) |
|
dataframes = [] |
|
for file in tqdm(h5_files, desc='Loading data'): |
|
file_path = os.path.join(directory, file) |
|
df = pd.read_hdf(file_path) |
|
if '__key__' not in df.columns: |
|
raise ValueError(f'Key column not found in {file_path}') |
|
df.__key__ = df.__key__.apply(pd.to_numeric) |
|
dataframes.append(df) |
|
concatenated_df = pd.concat(dataframes, ignore_index=True) |
|
concatenated_df.to_parquet('concatenated_synthetic_dataset.parquet.gzip', compression='gzip') |
|
return concatenated_df |
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser(description='Process .h5 files and tar indices.') |
|
parser.add_argument('--start_index', type=int, default=0, help='The starting index for tar processing.') |
|
parser.add_argument('--step_size', type=int, default=1, help='The step size for tar processing.') |
|
args = parser.parse_args() |
|
question_answer_df = load_and_concatenate_dataframes() |
|
print(len(question_answer_df)) |
|
process_tar_index(args.start_index, args.step_size, question_answer_df=question_answer_df) |
|
|
|
# File: docmatix-main/create_only_with_pdfs/upload_data.py |
|
from datasets import load_from_disk, concatenate_datasets |
|
from tqdm import tqdm |
|
import os |
|
|
|
def get_datasets(): |
|
if os.path.isdir('/fsx/m4/datasets/docmatix_pdf/concatenated'): |
|
return load_from_disk('/fsx/m4/datasets/docmatix_pdf/concatenated') |
|
hf_datasets = [] |
|
for shard_nr in tqdm(range(200)): |
|
try: |
|
hf_datasets.append(load_from_disk(f'/fsx/m4/datasets/docmatix_pdf/shard_{shard_nr}')) |
|
except Exception as e: |
|
print(f'Error loading dataset from: {shard_nr}') |
|
print(e) |
|
hf_data = concatenate_datasets(hf_datasets) |
|
hf_data.save_to_disk('/fsx/m4/datasets/docmatix_pdf/concatenated') |
|
return hf_data |
|
data = get_datasets() |
|
print(data.features) |
|
print(data[0]['texts']) |
|
print(data[0]['pdf'][:10]) |
|
print(len(data)) |
|
data.push_to_hub('HuggingFaceM4/Docmatix', 'pdf') |
|
|
|
# File: docmatix-main/florence_2_dataset/create_florence_2_dataset.py |
|
from functools import partial |
|
from datasets import load_from_disk, concatenate_datasets |
|
from tqdm import tqdm |
|
import re |
|
import pandas as pd |
|
import os |
|
import datasets |
|
IMAGE_FEATURES = datasets.Features({'image': datasets.Image(decode=True), '__key__': datasets.Value('int64')}) |
|
TEXT_FEATURES = datasets.Features({'question': datasets.Value('string'), 'answer': datasets.Value('string'), '__key__': datasets.Value('int64')}) |
|
|
|
def text_generator(df_text): |
|
for (i, row) in df_text.iterrows(): |
|
print(i, row['__key__']) |
|
yield {'question': row['question'], 'answer': row['answer'], '__key__': row['__key__']} |
|
|
|
def img_generator(df_img): |
|
for (i, row) in df_img.iterrows(): |
|
print(i, row['__key__']) |
|
yield {'image': row['images'][0], '__key__': row['__key__']} |
|
pre_key_len = len('PDFA key: ') |
|
for shard_number in tqdm(range(0, 200)): |
|
try: |
|
if os.path.exists(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard_number}') and os.path.exists(f'/fsx/m4/datasets/florence_vqa_instruct_images/shard_{shard_number}'): |
|
continue |
|
df_data = load_from_disk(f'/fsx/m4/datasets/docvqa_instruct/shard_{shard_number}').to_pandas() |
|
df_data['__key__'] = df_data.texts.apply(lambda x: x[0]['source'][pre_key_len:]) |
|
df_data['__key__'] = df_data['__key__'].apply(pd.to_numeric) |
|
df_images = df_data[['images', '__key__']].copy() |
|
df_images = df_images[df_images['images'].apply(len) <= 1] |
|
df_texts = df_data[['texts']].explode('texts') |
|
df_texts['question'] = df_texts['texts'].apply(lambda x: x.get('user')) |
|
df_texts['answer'] = df_texts['texts'].apply(lambda x: x.get('assistant')) |
|
df_texts['__key__'] = df_texts['texts'].apply(lambda x: x.get('source')[pre_key_len:]) |
|
df_texts['__key__'] = df_texts['__key__'].apply(pd.to_numeric) |
|
df_texts = df_texts[df_texts['__key__'].isin(df_images['__key__'].unique())] |
|
df_texts.drop(columns=['texts'], inplace=True) |
|
df_texts = df_texts[df_texts['question'].apply(lambda x: len(x.split()) <= 900)] |
|
df_texts = df_texts[df_texts['answer'].apply(lambda x: len(x.split()) <= 900)] |
|
df_images = df_images[df_images['__key__'].isin(df_texts['__key__'].unique())] |
|
ds_text = datasets.Dataset.from_generator(partial(text_generator, df_texts), features=TEXT_FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache') |
|
ds_text.save_to_disk(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard_number}') |
|
df_image = datasets.Dataset.from_generator(partial(img_generator, df_images), features=IMAGE_FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache') |
|
df_image.save_to_disk(f'/fsx/m4/datasets/florence_vqa_instruct_images/shard_{shard_number}') |
|
print(f'Finished processing shard: {shard_number}') |
|
except: |
|
print(f'shard {shard_number} failed') |
|
all_ds = [] |
|
for shard in tqdm(range(0, 200)): |
|
try: |
|
data = load_from_disk(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard}') |
|
all_ds.append(data) |
|
except: |
|
print(f'shard {shard} failed') |
|
all_ds = concatenate_datasets(all_ds) |
|
all_ds.save_to_disk('/fsx/m4/datasets/complete_florence_vqa_instruct', num_proc=96) |
|
|
|
# File: docmatix-main/generation/base_prompts.py |
|
BASE_PROMPT = '\nYou are reading text extracted from a PDF with several pages. The pages are divided by a line saying \'NEW PAGE\'. \nYour role is to {role_description}. If the type of questions requested are impossible to generate due to the simplicity of the document, default to simpler factual questions.\nThe PDFs might contain tables or images that are poorly parsed in the text. Avoid asking questions about these.\nIf the text seems to only contain uninteresting information, output "unanswerable" as the answer.\nHere are some examples for questions that follow your role:\n{examples}\n' |
|
BASE_USER_CONTENT = 'The text contained in the PDF is: \n{text} \n\nCreate the question answer pairs following this format:\nQ#: \nA#:\n\nIf you can\'t generate a questions for the text, write "unanswerable" as the answer.\n' |
|
PROMPTS = [{'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should be varied, covering factual information, inferences, and deeper analysis of the text.', 'examples': '\n Q1: What is the main topic of the document?\n A1: The main topic of the document is...\n \n Q2: What are the key points discussed in the first section?\n A2: The key points discussed in the first section include...\n\n Q3: How does the author support their argument about X?\n A3: The author supports their argument about X by...\n\n Q4: What can be inferred about Y from the document?\n A4: From the document, it can be inferred that Y...\n\n Q5: What are the implications of Z mentioned in the document?\n A5: The implications of Z mentioned in the document are...\n '}, {'role_description': 'focus on generating enough pairs of questions and answers for each section of the document to ensure a detailed and complete coverage the document.', 'examples': '\n Q1: What is the primary focus of the first section?\n A1: The primary focus of the first section is...\n\n Q2: What are the significant details mentioned in the second section?\n A2: The significant details mentioned in the second section include...\n\n Q3: How does the information in the third section relate to the overall topic of the document?\n A3: The information in the third section relates to the overall topic by...\n '}, {'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should require critical thinking and analysis.', 'examples': '\n Q1: What arguments does the author present in support of their thesis?\n A1: The arguments presented by the author in support of their thesis include...\n\n Q2: How does the author compare X and Y in the text?\n A2: The author compares X and Y by...\n\n Q3: What are the potential implications of the findings discussed in the document?\n A3: The potential implications of the findings are...\n '}, {'role_description': 'create as many pairs of questions and answers as you need to cover both summaries of sections and specific details. Ensure a coverage of broad themes and granular information.', 'examples': '\n Q1: What is the summary of the first section?\n A1: The summary of the first section is...\n\n Q2: What specific data or evidence is provided in the second section?\n A2: The specific data or evidence provided in the second section includes...\n\n Q3: How do the details in the third section support the main argument of the document?\n A3: The details in the third section support the main argument by...\n '}, {'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should be varied, covering factual information, inferences, and deeper analysis of the text. The questions should be asked in a general manner without introducing details from the document itself.', 'examples': '\n Q1: What is the summary of the first section?\n A1: The first section, called xxx, can be summarized as is...\n\n Q2: What specific data or evidence is provided in the second section?\n A2: In the section called xxx, there is a much data and evidence presented, such as...\n\n Q3: How do the details in the third section support the main argument of the document?\n A3: The details in the section on "xxx" support the main argument by...\n '}] |
|
|
|
def create_prompts(text): |
|
prompts = [] |
|
for prompt in PROMPTS: |
|
system_content = BASE_PROMPT.format(role_description=prompt['role_description'], examples=prompt['examples']) |
|
prompts.append([{'role': 'system', 'content': system_content}, {'role': 'user', 'content': BASE_USER_CONTENT.format(text=text)}]) |
|
return prompts |
|
|
|
# File: docmatix-main/generation/llm_swarm_script.py |
|
import asyncio |
|
import json |
|
import os |
|
import random |
|
import re |
|
from concurrent.futures import ThreadPoolExecutor |
|
from typing import Any, Dict, List, Optional |
|
import pandas as pd |
|
from datasets import IterableDataset, load_dataset |
|
from huggingface_hub import AsyncInferenceClient |
|
from tqdm import trange |
|
from tqdm.asyncio import tqdm_asyncio |
|
from transformers import AutoTokenizer |
|
from examples.question_answer_pairs.phase_1.base_prompts import BASE_PROMPT, BASE_USER_CONTENT, PROMPTS |
|
from llm_swarm import LLMSwarm, LLMSwarmConfig |
|
CHECKPOINT_FILE = 'checkpoint.json' |
|
DATA_PATH = '/fsx/andi/pdfa_data/' |
|
TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar' |
|
NUM_TAR_FILES = 1800 |
|
MAX_PAGES_PER_PDF = 4 |
|
STEP_SIZE = 10 |
|
model_id = 'microsoft/Phi-3-small-8k-instruct' |
|
|
|
def create_llm_prompt(prompt, text): |
|
system_content = BASE_PROMPT.format(role_description=prompt['role_description'], examples=prompt['examples']) |
|
return [{'role': 'system', 'content': system_content}, {'role': 'user', 'content': BASE_USER_CONTENT.format(text=text)}] |
|
|
|
def extract_text_per_page_from_sample(sample: Dict[str, Any]) -> List[str]: |
|
texts = [] |
|
for page in sample['json']['pages']: |
|
pages_text = ' \n '.join(page['lines']['text']) |
|
texts.append(pages_text) |
|
return texts |
|
|
|
def extract_chunks(pages: List[Any], max_tokens_per_group: int, max_pages_per_group: int, n_overlap: int) -> List[str]: |
|
chunks = [] |
|
current_chunk = [] |
|
current_chunk_tokens = 0 |
|
current_chunk_pages = 0 |
|
page_token_counts = [len(tokenizer.encode(page, add_special_tokens=False)) for page in pages] |
|
for (i, page) in enumerate(pages): |
|
page_tokens = page_token_counts[i] |
|
if page_tokens > max_tokens_per_group: |
|
print(f'Skipping document where page nr {i} has {page_tokens} tokens.') |
|
return [] |
|
if current_chunk_tokens + page_tokens > max_tokens_per_group or current_chunk_pages + 1 > max_pages_per_group: |
|
if current_chunk: |
|
chunks.append('\nNEW PAGE\n'.join(current_chunk)) |
|
current_chunk = current_chunk[-n_overlap:] if n_overlap > 0 else [] |
|
current_chunk_tokens = sum(page_token_counts[max(0, i - n_overlap):i]) |
|
current_chunk_pages = len(current_chunk) |
|
current_chunk.append(page) |
|
current_chunk_tokens += page_tokens |
|
current_chunk_pages += 1 |
|
if current_chunk: |
|
chunks.append('\nNEW PAGE\n'.join(current_chunk)) |
|
return chunks |
|
|
|
def create_tasks(dataset: IterableDataset, prompt_id: Optional[int]=None, n_overlap: int=2) -> List[Dict[str, Any]]: |
|
if prompt_id is not None: |
|
selected_id_prompt = prompt_id |
|
tasks = [] |
|
for (index, sample) in dataset.iterrows(): |
|
text_per_page = extract_text_per_page_from_sample(sample) |
|
if len(text_per_page) > MAX_PAGES_PER_PDF: |
|
continue |
|
page_chunks = extract_chunks(text_per_page, max_tokens_per_group=5000, max_pages_per_group=5, n_overlap=n_overlap) |
|
for chunk in page_chunks: |
|
if prompt_id is None: |
|
selected_id_prompt = random.randint(0, 4) |
|
prompt = PROMPTS[selected_id_prompt] |
|
messages = create_llm_prompt(prompt, chunk) |
|
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
|
tasks_dict = {'__key__': sample['__key__'], 'Page count': len(text_per_page), 'messages': prompt, 'Prompt ID': selected_id_prompt} |
|
tasks.append(tasks_dict) |
|
return tasks |
|
|
|
def extract_qa_pairs(text): |
|
qa_pattern = re.compile('(Q\\d+:\\s*.*?)(A\\d+:\\s*.*?)(?=(Q\\d+:)|$)', re.DOTALL) |
|
matches = qa_pattern.findall(text) |
|
qa_pairs = [(q.strip(), a.strip()) for match in matches for (q, a) in [match[:2]]] |
|
return qa_pairs |
|
|
|
def process_outputs_to_df(df): |
|
all_data = [] |
|
for (index, row) in df.iterrows(): |
|
task = row['Task'] |
|
completion = row['Completion'] |
|
sample_key = task['__key__'] |
|
page_count = task['Page count'] |
|
prompt_id = task['Prompt ID'] |
|
qa_pairs = extract_qa_pairs(completion) |
|
if len(qa_pairs) == 0: |
|
print('No Q&A pairs found for sample:', sample_key) |
|
for (question, answer) in qa_pairs: |
|
all_data.append({'__key__': sample_key, 'Page count': page_count, 'Prompt ID': prompt_id, 'question': question, 'answer': answer}) |
|
qa_df = pd.DataFrame(all_data) |
|
return qa_df |
|
|
|
def save_checkpoint(tar_index, total_examples): |
|
checkpoint_data = {'tar_index': tar_index, 'total_examples': total_examples} |
|
with open(CHECKPOINT_FILE, 'w') as f: |
|
json.dump(checkpoint_data, f) |
|
|
|
def load_checkpoint(): |
|
if os.path.exists(CHECKPOINT_FILE): |
|
with open(CHECKPOINT_FILE, 'r') as f: |
|
return json.load(f) |
|
return {'tar_index': 0, 'total_examples': 0} |
|
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
|
|
|
def launch(): |
|
with LLMSwarm(LLMSwarmConfig(instances=8, inference_engine='vllm', gpus=1, model=model_id, slurm_template_path='templates/vllm_h100.template.slurm', load_balancer_template_path='templates/nginx.template.conf', trust_remote_code=True, per_instance_max_parallel_requests=200)) as llm_swarm: |
|
semaphore = asyncio.Semaphore(llm_swarm.suggested_max_parallel_requests) |
|
client = AsyncInferenceClient(model=llm_swarm.endpoint) |
|
|
|
async def process_text(prompt): |
|
async with semaphore: |
|
response = await client.post(json={'prompt': prompt, 'max_tokens': 2000}) |
|
res = json.loads(response.decode('utf-8'))['text'][0][len(prompt):] |
|
return res |
|
|
|
def load_and_process_dataset(tar_file): |
|
try: |
|
print(f'Loading dataset from: {tar_file}') |
|
dataset = load_dataset('webdataset', split='train', data_files=tar_file).to_pandas() |
|
tasks = create_tasks(dataset, prompt_id=None, n_overlap=1) |
|
return tasks |
|
except Exception as e: |
|
print(f'Error loading dataset from: {tar_file}') |
|
print(e) |
|
return [] |
|
|
|
def get_future_tasks(tar_index, executor): |
|
futures = [] |
|
for inner_idx in range(STEP_SIZE): |
|
tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx)) |
|
futures.append(executor.submit(load_and_process_dataset, tar_file)) |
|
return futures |
|
|
|
async def process_dataset(tar_index, total_examples): |
|
next_future_tasks = get_future_tasks(tar_index, ThreadPoolExecutor(max_workers=STEP_SIZE)) |
|
for idx in trange(tar_index, NUM_TAR_FILES + STEP_SIZE, STEP_SIZE, desc='Creating Dataset'): |
|
print(f'Processing tar file {idx}') |
|
tasks = [] |
|
future_tasks = next_future_tasks |
|
results = [f.result() for f in future_tasks] |
|
for result in results: |
|
tasks.extend(result) |
|
next_future_tasks = get_future_tasks(idx + STEP_SIZE, ThreadPoolExecutor(max_workers=1)) |
|
results = await tqdm_asyncio.gather(*(process_text(task['messages']) for task in tasks)) |
|
df = pd.DataFrame({'Task': tasks, 'Completion': results}) |
|
df_new = process_outputs_to_df(df) |
|
df_new.to_hdf(f'synthetic_dataset_batch_{idx}.h5', key='df', mode='w') |
|
unique_keys = df_new['__key__'].nunique() |
|
total_examples += unique_keys |
|
save_checkpoint(idx, total_examples) |
|
|
|
async def main(): |
|
checkpoint = load_checkpoint() |
|
tar_index = checkpoint['tar_index'] |
|
if tar_index != 0: |
|
tar_index += STEP_SIZE |
|
print(f'Resuming from tar file {tar_index}') |
|
total_examples = checkpoint['total_examples'] |
|
processor = asyncio.create_task(process_dataset(tar_index, total_examples)) |
|
await processor |
|
print('All batches processed.') |
|
asyncio.run(main()) |
|
launch() |
|
|
|
# File: docmatix-main/zero_shot_exp/zero_shot.py |
|
from datasets import Dataset, Features, Value, load_dataset, Image, Sequence |
|
TEST_SUBSET_LEN = 200 |
|
TRAIN_SUBSET_LEN = 1700 |
|
FEATURES = Features({'images': Sequence(Image(decode=True)), 'texts': [{'user': Value('string'), 'assistant': Value('string'), 'source': Value('string')}]}) |
|
ds = load_dataset('HuggingFaceM4/Docmatix', 'images', streaming=True) |
|
test_subset = [] |
|
train_subset = [] |
|
for (idx, sample) in enumerate(ds['train']): |
|
if idx < TEST_SUBSET_LEN: |
|
test_subset.append(sample) |
|
if idx >= TEST_SUBSET_LEN - 1: |
|
if idx >= TEST_SUBSET_LEN + TRAIN_SUBSET_LEN - 1: |
|
break |
|
train_subset.append(sample) |
|
new_test_data = Dataset.from_list(test_subset, features=FEATURES) |
|
new_train_data = Dataset.from_list(train_subset, features=FEATURES) |
|
new_test_data.push_to_hub('HuggingFaceM4/Docmatix', 'zero-shot-exp', split='test') |
|
new_train_data.push_to_hub('HuggingFaceM4/Docmatix', 'zero-shot-exp', split='train') |
|
|
|
|