File size: 34,574 Bytes
b1d4de0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
# File: docmatix-main/analysis/count_words_in_dataset.py
from collections import Counter
import string

def count_words(df, column_name):
    overall_counter = Counter()
    word_counts = []
    for text in df[column_name]:
        text = text.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation)))
        words = text.lower().split()
        word_count = len(words)
        word_counts.append(word_count)
        overall_counter.update(words)
    df['word_count'] = word_counts
    most_common_words = overall_counter.most_common(100)
    return (df, most_common_words)

# File: docmatix-main/analysis/plot.py
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
analysis_df = pd.read_json('prompt_analysis_results.json', orient='records', lines=True)
sns.set(style='whitegrid')
plt.figure(figsize=(16, 12))
plt.subplot(3, 2, 1)
sns.barplot(x='Prompt ID', y='Number of Q/A pairs', data=analysis_df, palette='viridis')
plt.title('Number of Q/A pairs per Prompt ID')
plt.xlabel('Prompt ID')
plt.ylabel('Number of Q/A pairs')
for (i, row) in analysis_df.iterrows():
    plt.text(i, row['Number of Q/A pairs'], f"{row['Number of Q/A pairs'] / 1000000.0:.2f}e6", ha='center', va='bottom')
plt.subplot(3, 2, 2)
sns.barplot(x='Prompt ID', y='Average answer length', data=analysis_df, palette='viridis')
plt.title('Average Answer Length per Prompt ID')
plt.xlabel('Prompt ID')
plt.ylabel('Average Answer Length')
for (i, row) in analysis_df.iterrows():
    plt.text(i, row['Average answer length'], f"{row['Average answer length']:.2f}", ha='center', va='bottom')
plt.subplot(3, 2, 3)
sns.barplot(x='Prompt ID', y='Diversity within documents', data=analysis_df, palette='viridis')
plt.title('Diversity within Documents per Prompt ID')
plt.xlabel('Prompt ID')
plt.ylabel('Diversity within Documents')
for (i, row) in analysis_df.iterrows():
    plt.text(i, row['Diversity within documents'], f"{row['Diversity within documents']:.2f}", ha='center', va='bottom')
plt.subplot(3, 2, 4)
sns.barplot(x='Prompt ID', y='Total empty questions', data=analysis_df, palette='viridis')
plt.title('Total Empty Questions per Prompt ID')
plt.xlabel('Prompt ID')
plt.ylabel('Total Empty Questions')
for (i, row) in analysis_df.iterrows():
    plt.text(i, row['Total empty questions'], f"{row['Total empty questions']}", ha='center', va='bottom')
plt.subplot(3, 2, 5)
sns.barplot(x='Prompt ID', y='Average Q/A pairs per page', data=analysis_df, palette='viridis')
plt.title('Average Q/A pairs per Page per Prompt ID')
plt.xlabel('Prompt ID')
plt.ylabel('Average Q/A pairs per Page')
for (i, row) in analysis_df.iterrows():
    plt.text(i, row['Average Q/A pairs per page'], f"{row['Average Q/A pairs per page']:.2f}", ha='center', va='bottom')
plt.subplot(3, 2, 6)
sns.barplot(x='Prompt ID', y='Number of unique questions', data=analysis_df, palette='viridis')
plt.title('Number of unique questions per Prompt ID')
plt.xlabel('Prompt ID')
plt.ylabel('Number of unique questions')
for (i, row) in analysis_df.iterrows():
    plt.text(i, row['Number of unique questions'], f"{row['Number of unique questions'] / 1000000.0:.2f}e6", ha='center', va='bottom')
plt.tight_layout()
plt.savefig('prompt_analysis_plots_enhanced.png')
plt.show()
report = f"\nPrompt Analysis Report\n=======================\nNumber of Q/A pairs per Prompt ID:\n{analysis_df[['Prompt ID', 'Number of Q/A pairs']]}\n\nAverage answer length per Prompt ID:\n{analysis_df[['Prompt ID', 'Average answer length']]}\n\nUnique questions per Prompt ID:\n{analysis_df[['Prompt ID', 'Number of unique questions']]}\n\nTotal pages per Prompt ID:\n{analysis_df[['Prompt ID', 'Total pages']]}\n\nAverage Q/A pairs per page per Prompt ID:\n{analysis_df[['Prompt ID', 'Average Q/A pairs per page']]}\n\nAverage answer length per page per Prompt ID:\n{analysis_df[['Prompt ID', 'Average answer length per page']]}\n\nDiversity within documents per Prompt ID:\n{analysis_df[['Prompt ID', 'Diversity within documents']]}\n\nTotal empty questions per Prompt ID:\n{analysis_df[['Prompt ID', 'Total empty questions']]}\n\n"
with open('prompt_analysis_report.txt', 'w') as f:
    f.write(report)
print('Report and plots generated successfully.')

# File: docmatix-main/clean_and_create/load_data.py
import os
import re
import io
from io import BytesIO
import pandas as pd
import datasets
from pdf2image import convert_from_bytes
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
import argparse
import fitz
import PIL.Image
tqdm.pandas(desc='Pandas apply progress')
fitz.TOOLS.mupdf_display_errors(False)
DATA_PATH = '/fsx/andi/pdfa_data/'
TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar'

def resize_large_images(image, max_image_size=2940):
    (width, height) = image.size
    aspect_ratio = width / height
    resized = False
    if width >= height and width > max_image_size:
        width = max_image_size
        height = int(width / aspect_ratio)
        resized = True
    elif height > width and height > max_image_size:
        height = max_image_size
        width = int(height * aspect_ratio)
        resized = True
    if resized:
        image = image.resize((width, height), PIL.Image.LANCZOS)
    return image

def _decode_pdf_pages(sample):
    try:
        image_fmt = 'L'
        with io.BytesIO(sample) as b:
            doc = fitz.Document(stream=b)
            num_image_pages = doc.page_count
            decoded_image_pages = []
            for page_index in range(num_image_pages):
                page = doc.load_page(page_index)
                pixmap = page.get_pixmap(dpi=150)
                page_image = PIL.Image.frombuffer('RGB', (pixmap.width, pixmap.height), pixmap.samples)
                page_image = resize_large_images(page_image.convert(image_fmt))
                decoded_image_pages += [page_image]
            return decoded_image_pages
    except Exception as e:
        print(f'Error decoding pdf pages: {e}')
        return None

def convert_img_to_png_bytes(img):
    with BytesIO() as buffer:
        img.save(buffer, format='PNG')
        return buffer.getvalue()

def process_images(pdf_bytes):
    images = convert_from_bytes(pdf_bytes, dpi=150)
    return [convert_img_to_png_bytes(resize_large_images(img)) for img in images]

def is_valid_question_or_answer(text):
    if not text or text.strip() == '':
        return False
    patterns = ['\\{.*?\\}', '\\[.*?\\]', '<.*?>', '\\b\\d{1,3}(\\.\\d{1,3}){3}\\b', '\\w+\\.\\w+', '\\n\\s*\\n', 'unanswerable', 'Q\\d+: ', 'A\\d+: ']
    return not any((re.search(pattern, text, re.IGNORECASE) for pattern in patterns))

def process_group(key_group):
    try:
        (key, group) = key_group
        qa_pairs = []
        for (_, row) in group.iterrows():
            question = re.sub('^Q\\d+: ', '', row['question'])
            answer = re.sub('^A\\d+: ', '', row['answer'])
            if is_valid_question_or_answer(question) and is_valid_question_or_answer(answer):
                qa_pairs.append({'user': question, 'assistant': answer, 'source': 'PDFA key: ' + str(row['__key__'])})
        if qa_pairs:
            return {'texts': qa_pairs, 'images': group['pdf'].iloc[0]}
    except Exception as e:
        print(f'Error processing group {key}: {e}')
        return None

def process_tar_index(tar_index, step_size, question_answer_df):
    shard_nr = tar_index // step_size
    loaded_datasets = []
    for inner_idx in range(step_size):
        tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx))
        try:
            print(f'Loading dataset from: {tar_file}')
            hf_dataset = datasets.load_dataset('webdataset', split='train', data_files=tar_file, cache_dir='/fsx/.cache').to_pandas()
            hf_dataset.__key__ = hf_dataset.__key__.apply(pd.to_numeric)
            loaded_datasets.append(hf_dataset)
        except Exception as e:
            print(f'Error loading dataset from: {tar_file}')
            print(e)
    hf_dataset = pd.concat(loaded_datasets, ignore_index=True)
    print(f'Concatenated datasets with {len(hf_dataset)} samples')
    hf_dataset = hf_dataset[hf_dataset['__key__'].isin(question_answer_df['__key__'].unique())]
    df_data = pd.DataFrame({'key': []})
    if os.path.exists(f'/fsx/m4/datasets/large_docvqa/shard_{shard_nr}'):
        print('using saved data')
        df_data = datasets.load_from_disk(f'/fsx/m4/datasets/large_docvqa/shard_{shard_nr}').to_pandas()
        df_data['__key__'] = df_data.texts.apply(lambda x: x[0]['source'].split('_')[1])
        df_data['__key__'] = df_data['__key__'].apply(pd.to_numeric)
        df_data.drop(columns=['texts'], inplace=True)
        hf_dataset = hf_dataset[hf_dataset['__key__'].isin(df_data['__key__'].unique())]
        hf_dataset = pd.merge(hf_dataset, df_data, on='__key__', how='inner')
        hf_dataset['pdf'] = hf_dataset['images']
        hf_dataset.drop(columns=['images'], inplace=True)
        del df_data
    else:
        hf_dataset['pdf'] = hf_dataset['pdf'].progress_apply(lambda x: process_images(x))
        hf_dataset = hf_dataset[~hf_dataset['pdf'].isnull()]
    merged_df = pd.merge(hf_dataset, question_answer_df, on='__key__', how='inner')
    data_extracted = []
    max_threads = 10
    with ThreadPoolExecutor(max_threads) as executor:
        results = list(tqdm(executor.map(process_group, merged_df.groupby('__key__')), desc='Extracting data', total=len(merged_df['__key__'].unique())))
    data_extracted.extend(results)
    data_extracted = list(filter(lambda item: item is not None, data_extracted))
    FEATURES = datasets.Features({'images': datasets.Sequence(datasets.Image(decode=True)), 'texts': [{'user': datasets.Value('string'), 'assistant': datasets.Value('string'), 'source': datasets.Value('string')}]})

    def data_generator():
        for data_dict in data_extracted:
            yield data_dict
    ds_shard = datasets.Dataset.from_generator(data_generator, features=FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache')
    ds_shard.save_to_disk(f'/fsx/m4/datasets/docvqa_instruct/shard_{shard_nr}')

def load_and_concatenate_dataframes():
    if os.path.exists('concatenated_synthetic_dataset.parquet.gzip'):
        return pd.read_parquet('concatenated_synthetic_dataset.parquet.gzip')
    directory = '.'
    all_files = os.listdir(directory)
    h5_files = sorted([f for f in all_files if re.match('synthetic_dataset_batch_\\d+\\.h5$', f)])
    dataframes = []
    for file in tqdm(h5_files, desc='Loading data'):
        file_path = os.path.join(directory, file)
        df = pd.read_hdf(file_path)
        if '__key__' not in df.columns:
            raise ValueError(f'Key column not found in {file_path}')
        df.__key__ = df.__key__.apply(pd.to_numeric)
        dataframes.append(df)
    concatenated_df = pd.concat(dataframes, ignore_index=True)
    concatenated_df.to_parquet('concatenated_synthetic_dataset.parquet.gzip', compression='gzip')
    return concatenated_df
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Process .h5 files and tar indices.')
    parser.add_argument('--start_index', type=int, default=0, help='The starting index for tar processing.')
    parser.add_argument('--step_size', type=int, default=1, help='The step size for tar processing.')
    args = parser.parse_args()
    question_answer_df = load_and_concatenate_dataframes()
    print(len(question_answer_df))
    process_tar_index(args.start_index, args.step_size, question_answer_df=question_answer_df)

# File: docmatix-main/create_only_with_pdfs/load_data.py
import os
import re
import pandas as pd
import datasets
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
import argparse
tqdm.pandas(desc='Pandas apply progress')
DATA_PATH = '/fsx/andi/pdfa_data/'
TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar'

def is_valid_question_or_answer(text):
    if not text or text.strip() == '':
        return False
    patterns = ['\\{.*?\\}', '\\[.*?\\]', '<.*?>', '\\b\\d{1,3}(\\.\\d{1,3}){3}\\b', '\\w+\\.\\w+', '\\n\\s*\\n', 'unanswerable', 'Q\\d+: ', 'A\\d+: ']
    return not any((re.search(pattern, text, re.IGNORECASE) for pattern in patterns))

def process_group(key_group):
    try:
        (key, group) = key_group
        qa_pairs = []
        for (_, row) in group.iterrows():
            question = re.sub('^Q\\d+: ', '', row['question'])
            answer = re.sub('^A\\d+: ', '', row['answer'])
            if is_valid_question_or_answer(question) and is_valid_question_or_answer(answer):
                qa_pairs.append({'user': question, 'assistant': answer, 'source': 'PDFA key: ' + str(row['__key__'])})
        if qa_pairs:
            return {'texts': qa_pairs, 'pdf': group['pdf'].iloc[0]}
    except Exception as e:
        print(f'Error processing group {key}: {e}')
        return None

def process_tar_index(tar_index, step_size, question_answer_df):
    shard_nr = tar_index // step_size
    loaded_datasets = []
    for inner_idx in range(step_size):
        tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx))
        try:
            print(f'Loading dataset from: {tar_file}')
            hf_dataset = datasets.load_dataset('webdataset', split='train', data_files=tar_file, cache_dir='/fsx/.cache').to_pandas()
            hf_dataset.__key__ = hf_dataset.__key__.apply(pd.to_numeric)
            loaded_datasets.append(hf_dataset)
        except Exception as e:
            print(f'Error loading dataset from: {tar_file}')
            print(e)
    hf_dataset = pd.concat(loaded_datasets, ignore_index=True)
    print(f'Concatenated datasets with {len(hf_dataset)} samples')
    hf_dataset = hf_dataset[hf_dataset['__key__'].isin(question_answer_df['__key__'].unique())]
    merged_df = pd.merge(hf_dataset, question_answer_df, on='__key__', how='inner')
    data_extracted = []
    max_threads = 10
    with ThreadPoolExecutor(max_threads) as executor:
        results = list(tqdm(executor.map(process_group, merged_df.groupby('__key__')), desc='Extracting data', total=len(merged_df['__key__'].unique())))
    data_extracted.extend(results)
    data_extracted = list(filter(lambda item: item is not None, data_extracted))
    FEATURES = datasets.Features({'pdf': datasets.Value('binary'), 'texts': [{'user': datasets.Value('string'), 'assistant': datasets.Value('string'), 'source': datasets.Value('string')}]})

    def data_generator():
        for data_dict in data_extracted:
            yield data_dict
    ds_shard = datasets.Dataset.from_generator(data_generator, features=FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache')
    ds_shard.save_to_disk(f'/fsx/m4/datasets/docmatix_pdf/shard_{shard_nr}')

def load_and_concatenate_dataframes():
    if os.path.exists('/fsx/andi/llm-swarm/concatenated_synthetic_dataset.parquet.gzip'):
        return pd.read_parquet('/fsx/andi/llm-swarm/concatenated_synthetic_dataset.parquet.gzip')
    directory = '.'
    all_files = os.listdir(directory)
    h5_files = sorted([f for f in all_files if re.match('synthetic_dataset_batch_\\d+\\.h5$', f)])
    dataframes = []
    for file in tqdm(h5_files, desc='Loading data'):
        file_path = os.path.join(directory, file)
        df = pd.read_hdf(file_path)
        if '__key__' not in df.columns:
            raise ValueError(f'Key column not found in {file_path}')
        df.__key__ = df.__key__.apply(pd.to_numeric)
        dataframes.append(df)
    concatenated_df = pd.concat(dataframes, ignore_index=True)
    concatenated_df.to_parquet('concatenated_synthetic_dataset.parquet.gzip', compression='gzip')
    return concatenated_df
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Process .h5 files and tar indices.')
    parser.add_argument('--start_index', type=int, default=0, help='The starting index for tar processing.')
    parser.add_argument('--step_size', type=int, default=1, help='The step size for tar processing.')
    args = parser.parse_args()
    question_answer_df = load_and_concatenate_dataframes()
    print(len(question_answer_df))
    process_tar_index(args.start_index, args.step_size, question_answer_df=question_answer_df)

# File: docmatix-main/create_only_with_pdfs/upload_data.py
from datasets import load_from_disk, concatenate_datasets
from tqdm import tqdm
import os

def get_datasets():
    if os.path.isdir('/fsx/m4/datasets/docmatix_pdf/concatenated'):
        return load_from_disk('/fsx/m4/datasets/docmatix_pdf/concatenated')
    hf_datasets = []
    for shard_nr in tqdm(range(200)):
        try:
            hf_datasets.append(load_from_disk(f'/fsx/m4/datasets/docmatix_pdf/shard_{shard_nr}'))
        except Exception as e:
            print(f'Error loading dataset from: {shard_nr}')
            print(e)
    hf_data = concatenate_datasets(hf_datasets)
    hf_data.save_to_disk('/fsx/m4/datasets/docmatix_pdf/concatenated')
    return hf_data
data = get_datasets()
print(data.features)
print(data[0]['texts'])
print(data[0]['pdf'][:10])
print(len(data))
data.push_to_hub('HuggingFaceM4/Docmatix', 'pdf')

# File: docmatix-main/florence_2_dataset/create_florence_2_dataset.py
from functools import partial
from datasets import load_from_disk, concatenate_datasets
from tqdm import tqdm
import re
import pandas as pd
import os
import datasets
IMAGE_FEATURES = datasets.Features({'image': datasets.Image(decode=True), '__key__': datasets.Value('int64')})
TEXT_FEATURES = datasets.Features({'question': datasets.Value('string'), 'answer': datasets.Value('string'), '__key__': datasets.Value('int64')})

def text_generator(df_text):
    for (i, row) in df_text.iterrows():
        print(i, row['__key__'])
        yield {'question': row['question'], 'answer': row['answer'], '__key__': row['__key__']}

def img_generator(df_img):
    for (i, row) in df_img.iterrows():
        print(i, row['__key__'])
        yield {'image': row['images'][0], '__key__': row['__key__']}
pre_key_len = len('PDFA key: ')
for shard_number in tqdm(range(0, 200)):
    try:
        if os.path.exists(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard_number}') and os.path.exists(f'/fsx/m4/datasets/florence_vqa_instruct_images/shard_{shard_number}'):
            continue
        df_data = load_from_disk(f'/fsx/m4/datasets/docvqa_instruct/shard_{shard_number}').to_pandas()
        df_data['__key__'] = df_data.texts.apply(lambda x: x[0]['source'][pre_key_len:])
        df_data['__key__'] = df_data['__key__'].apply(pd.to_numeric)
        df_images = df_data[['images', '__key__']].copy()
        df_images = df_images[df_images['images'].apply(len) <= 1]
        df_texts = df_data[['texts']].explode('texts')
        df_texts['question'] = df_texts['texts'].apply(lambda x: x.get('user'))
        df_texts['answer'] = df_texts['texts'].apply(lambda x: x.get('assistant'))
        df_texts['__key__'] = df_texts['texts'].apply(lambda x: x.get('source')[pre_key_len:])
        df_texts['__key__'] = df_texts['__key__'].apply(pd.to_numeric)
        df_texts = df_texts[df_texts['__key__'].isin(df_images['__key__'].unique())]
        df_texts.drop(columns=['texts'], inplace=True)
        df_texts = df_texts[df_texts['question'].apply(lambda x: len(x.split()) <= 900)]
        df_texts = df_texts[df_texts['answer'].apply(lambda x: len(x.split()) <= 900)]
        df_images = df_images[df_images['__key__'].isin(df_texts['__key__'].unique())]
        ds_text = datasets.Dataset.from_generator(partial(text_generator, df_texts), features=TEXT_FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache')
        ds_text.save_to_disk(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard_number}')
        df_image = datasets.Dataset.from_generator(partial(img_generator, df_images), features=IMAGE_FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache')
        df_image.save_to_disk(f'/fsx/m4/datasets/florence_vqa_instruct_images/shard_{shard_number}')
        print(f'Finished processing shard: {shard_number}')
    except:
        print(f'shard {shard_number} failed')
all_ds = []
for shard in tqdm(range(0, 200)):
    try:
        data = load_from_disk(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard}')
        all_ds.append(data)
    except:
        print(f'shard {shard} failed')
all_ds = concatenate_datasets(all_ds)
all_ds.save_to_disk('/fsx/m4/datasets/complete_florence_vqa_instruct', num_proc=96)

# File: docmatix-main/generation/base_prompts.py
BASE_PROMPT = '\nYou are reading text extracted from a PDF with several pages. The pages are divided by a line saying \'NEW PAGE\'. \nYour role is to {role_description}. If the type of questions requested are impossible to generate due to the simplicity of the document, default to simpler factual questions.\nThe PDFs might contain tables or images that are poorly parsed in the text. Avoid asking questions about these.\nIf the text seems to only contain uninteresting information, output "unanswerable" as the answer.\nHere are some examples for questions that follow your role:\n{examples}\n'
BASE_USER_CONTENT = 'The text contained in the PDF is: \n{text} \n\nCreate the question answer pairs following this format:\nQ#: \nA#:\n\nIf you can\'t generate a questions for the text, write "unanswerable" as the answer.\n'
PROMPTS = [{'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should be varied, covering factual information, inferences, and deeper analysis of the text.', 'examples': '\n        Q1: What is the main topic of the document?\n        A1: The main topic of the document is...\n        \n        Q2: What are the key points discussed in the first section?\n        A2: The key points discussed in the first section include...\n\n        Q3: How does the author support their argument about X?\n        A3: The author supports their argument about X by...\n\n        Q4: What can be inferred about Y from the document?\n        A4: From the document, it can be inferred that Y...\n\n        Q5: What are the implications of Z mentioned in the document?\n        A5: The implications of Z mentioned in the document are...\n        '}, {'role_description': 'focus on generating enough pairs of questions and answers for each section of the document to ensure a detailed and complete coverage the document.', 'examples': '\n        Q1: What is the primary focus of the first section?\n        A1: The primary focus of the first section is...\n\n        Q2: What are the significant details mentioned in the second section?\n        A2: The significant details mentioned in the second section include...\n\n        Q3: How does the information in the third section relate to the overall topic of the document?\n        A3: The information in the third section relates to the overall topic by...\n        '}, {'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should require critical thinking and analysis.', 'examples': '\n        Q1: What arguments does the author present in support of their thesis?\n        A1: The arguments presented by the author in support of their thesis include...\n\n        Q2: How does the author compare X and Y in the text?\n        A2: The author compares X and Y by...\n\n        Q3: What are the potential implications of the findings discussed in the document?\n        A3: The potential implications of the findings are...\n        '}, {'role_description': 'create as many pairs of questions and answers as you need to cover both summaries of sections and specific details. Ensure a coverage of broad themes and granular information.', 'examples': '\n        Q1: What is the summary of the first section?\n        A1: The summary of the first section is...\n\n        Q2: What specific data or evidence is provided in the second section?\n        A2: The specific data or evidence provided in the second section includes...\n\n        Q3: How do the details in the third section support the main argument of the document?\n        A3: The details in the third section support the main argument by...\n        '}, {'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should be varied, covering factual information, inferences, and deeper analysis of the text. The questions should be asked in a general manner without introducing details from the document itself.', 'examples': '\n        Q1: What is the summary of the first section?\n        A1: The first section, called xxx, can be summarized as is...\n\n        Q2: What specific data or evidence is provided in the second section?\n        A2: In the section called xxx, there is a much data and evidence presented, such as...\n\n        Q3: How do the details in the third section support the main argument of the document?\n        A3: The details in the section on "xxx" support the main argument by...\n        '}]

def create_prompts(text):
    prompts = []
    for prompt in PROMPTS:
        system_content = BASE_PROMPT.format(role_description=prompt['role_description'], examples=prompt['examples'])
        prompts.append([{'role': 'system', 'content': system_content}, {'role': 'user', 'content': BASE_USER_CONTENT.format(text=text)}])
    return prompts

# File: docmatix-main/generation/llm_swarm_script.py
import asyncio
import json
import os
import random
import re
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Optional
import pandas as pd
from datasets import IterableDataset, load_dataset
from huggingface_hub import AsyncInferenceClient
from tqdm import trange
from tqdm.asyncio import tqdm_asyncio
from transformers import AutoTokenizer
from examples.question_answer_pairs.phase_1.base_prompts import BASE_PROMPT, BASE_USER_CONTENT, PROMPTS
from llm_swarm import LLMSwarm, LLMSwarmConfig
CHECKPOINT_FILE = 'checkpoint.json'
DATA_PATH = '/fsx/andi/pdfa_data/'
TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar'
NUM_TAR_FILES = 1800
MAX_PAGES_PER_PDF = 4
STEP_SIZE = 10
model_id = 'microsoft/Phi-3-small-8k-instruct'

def create_llm_prompt(prompt, text):
    system_content = BASE_PROMPT.format(role_description=prompt['role_description'], examples=prompt['examples'])
    return [{'role': 'system', 'content': system_content}, {'role': 'user', 'content': BASE_USER_CONTENT.format(text=text)}]

def extract_text_per_page_from_sample(sample: Dict[str, Any]) -> List[str]:
    texts = []
    for page in sample['json']['pages']:
        pages_text = ' \n '.join(page['lines']['text'])
        texts.append(pages_text)
    return texts

def extract_chunks(pages: List[Any], max_tokens_per_group: int, max_pages_per_group: int, n_overlap: int) -> List[str]:
    chunks = []
    current_chunk = []
    current_chunk_tokens = 0
    current_chunk_pages = 0
    page_token_counts = [len(tokenizer.encode(page, add_special_tokens=False)) for page in pages]
    for (i, page) in enumerate(pages):
        page_tokens = page_token_counts[i]
        if page_tokens > max_tokens_per_group:
            print(f'Skipping document where page nr {i} has {page_tokens} tokens.')
            return []
        if current_chunk_tokens + page_tokens > max_tokens_per_group or current_chunk_pages + 1 > max_pages_per_group:
            if current_chunk:
                chunks.append('\nNEW PAGE\n'.join(current_chunk))
            current_chunk = current_chunk[-n_overlap:] if n_overlap > 0 else []
            current_chunk_tokens = sum(page_token_counts[max(0, i - n_overlap):i])
            current_chunk_pages = len(current_chunk)
        current_chunk.append(page)
        current_chunk_tokens += page_tokens
        current_chunk_pages += 1
    if current_chunk:
        chunks.append('\nNEW PAGE\n'.join(current_chunk))
    return chunks

def create_tasks(dataset: IterableDataset, prompt_id: Optional[int]=None, n_overlap: int=2) -> List[Dict[str, Any]]:
    if prompt_id is not None:
        selected_id_prompt = prompt_id
    tasks = []
    for (index, sample) in dataset.iterrows():
        text_per_page = extract_text_per_page_from_sample(sample)
        if len(text_per_page) > MAX_PAGES_PER_PDF:
            continue
        page_chunks = extract_chunks(text_per_page, max_tokens_per_group=5000, max_pages_per_group=5, n_overlap=n_overlap)
        for chunk in page_chunks:
            if prompt_id is None:
                selected_id_prompt = random.randint(0, 4)
            prompt = PROMPTS[selected_id_prompt]
            messages = create_llm_prompt(prompt, chunk)
            prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
            tasks_dict = {'__key__': sample['__key__'], 'Page count': len(text_per_page), 'messages': prompt, 'Prompt ID': selected_id_prompt}
            tasks.append(tasks_dict)
    return tasks

def extract_qa_pairs(text):
    qa_pattern = re.compile('(Q\\d+:\\s*.*?)(A\\d+:\\s*.*?)(?=(Q\\d+:)|$)', re.DOTALL)
    matches = qa_pattern.findall(text)
    qa_pairs = [(q.strip(), a.strip()) for match in matches for (q, a) in [match[:2]]]
    return qa_pairs

def process_outputs_to_df(df):
    all_data = []
    for (index, row) in df.iterrows():
        task = row['Task']
        completion = row['Completion']
        sample_key = task['__key__']
        page_count = task['Page count']
        prompt_id = task['Prompt ID']
        qa_pairs = extract_qa_pairs(completion)
        if len(qa_pairs) == 0:
            print('No Q&A pairs found for sample:', sample_key)
        for (question, answer) in qa_pairs:
            all_data.append({'__key__': sample_key, 'Page count': page_count, 'Prompt ID': prompt_id, 'question': question, 'answer': answer})
    qa_df = pd.DataFrame(all_data)
    return qa_df

def save_checkpoint(tar_index, total_examples):
    checkpoint_data = {'tar_index': tar_index, 'total_examples': total_examples}
    with open(CHECKPOINT_FILE, 'w') as f:
        json.dump(checkpoint_data, f)

def load_checkpoint():
    if os.path.exists(CHECKPOINT_FILE):
        with open(CHECKPOINT_FILE, 'r') as f:
            return json.load(f)
    return {'tar_index': 0, 'total_examples': 0}
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)

def launch():
    with LLMSwarm(LLMSwarmConfig(instances=8, inference_engine='vllm', gpus=1, model=model_id, slurm_template_path='templates/vllm_h100.template.slurm', load_balancer_template_path='templates/nginx.template.conf', trust_remote_code=True, per_instance_max_parallel_requests=200)) as llm_swarm:
        semaphore = asyncio.Semaphore(llm_swarm.suggested_max_parallel_requests)
        client = AsyncInferenceClient(model=llm_swarm.endpoint)

        async def process_text(prompt):
            async with semaphore:
                response = await client.post(json={'prompt': prompt, 'max_tokens': 2000})
                res = json.loads(response.decode('utf-8'))['text'][0][len(prompt):]
                return res

        def load_and_process_dataset(tar_file):
            try:
                print(f'Loading dataset from: {tar_file}')
                dataset = load_dataset('webdataset', split='train', data_files=tar_file).to_pandas()
                tasks = create_tasks(dataset, prompt_id=None, n_overlap=1)
                return tasks
            except Exception as e:
                print(f'Error loading dataset from: {tar_file}')
                print(e)
                return []

        def get_future_tasks(tar_index, executor):
            futures = []
            for inner_idx in range(STEP_SIZE):
                tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx))
                futures.append(executor.submit(load_and_process_dataset, tar_file))
            return futures

        async def process_dataset(tar_index, total_examples):
            next_future_tasks = get_future_tasks(tar_index, ThreadPoolExecutor(max_workers=STEP_SIZE))
            for idx in trange(tar_index, NUM_TAR_FILES + STEP_SIZE, STEP_SIZE, desc='Creating Dataset'):
                print(f'Processing tar file {idx}')
                tasks = []
                future_tasks = next_future_tasks
                results = [f.result() for f in future_tasks]
                for result in results:
                    tasks.extend(result)
                next_future_tasks = get_future_tasks(idx + STEP_SIZE, ThreadPoolExecutor(max_workers=1))
                results = await tqdm_asyncio.gather(*(process_text(task['messages']) for task in tasks))
                df = pd.DataFrame({'Task': tasks, 'Completion': results})
                df_new = process_outputs_to_df(df)
                df_new.to_hdf(f'synthetic_dataset_batch_{idx}.h5', key='df', mode='w')
                unique_keys = df_new['__key__'].nunique()
                total_examples += unique_keys
                save_checkpoint(idx, total_examples)

        async def main():
            checkpoint = load_checkpoint()
            tar_index = checkpoint['tar_index']
            if tar_index != 0:
                tar_index += STEP_SIZE
                print(f'Resuming from tar file {tar_index}')
            total_examples = checkpoint['total_examples']
            processor = asyncio.create_task(process_dataset(tar_index, total_examples))
            await processor
            print('All batches processed.')
        asyncio.run(main())
launch()

# File: docmatix-main/zero_shot_exp/zero_shot.py
from datasets import Dataset, Features, Value, load_dataset, Image, Sequence
TEST_SUBSET_LEN = 200
TRAIN_SUBSET_LEN = 1700
FEATURES = Features({'images': Sequence(Image(decode=True)), 'texts': [{'user': Value('string'), 'assistant': Value('string'), 'source': Value('string')}]})
ds = load_dataset('HuggingFaceM4/Docmatix', 'images', streaming=True)
test_subset = []
train_subset = []
for (idx, sample) in enumerate(ds['train']):
    if idx < TEST_SUBSET_LEN:
        test_subset.append(sample)
    if idx >= TEST_SUBSET_LEN - 1:
        if idx >= TEST_SUBSET_LEN + TRAIN_SUBSET_LEN - 1:
            break
        train_subset.append(sample)
new_test_data = Dataset.from_list(test_subset, features=FEATURES)
new_train_data = Dataset.from_list(train_subset, features=FEATURES)
new_test_data.push_to_hub('HuggingFaceM4/Docmatix', 'zero-shot-exp', split='test')
new_train_data.push_to_hub('HuggingFaceM4/Docmatix', 'zero-shot-exp', split='train')