import json
import pandas as pd


# Define a generator function to read in the data in smaller chunks
def read_json(file_path):
    with open(file_path, encoding='utf-8') as f:
        while True:
            chunk = f.readline()
            print(chunk)
            if not chunk:
                break
            yield json.loads(chunk)


results = []
count = 1

# Iterate over the generator function instead of the entire file
for chunk in read_json('data.txt'):
    comment_info = chunk['snippet']['topLevelComment']['snippet']
    comment = {
        'Number': count,
        'textDisplay': comment_info['textDisplay'],
        'textOriginal': comment_info['textOriginal'],
        'authorDisplayName': comment_info['authorDisplayName'],
        'publishedAt': comment_info['publishedAt'],
        'updatedAt': comment_info['updatedAt']
    }
    results.append(comment)
    count += 1

    if 'replies' in chunk.keys():
        for reply in chunk['replies']['comments']:
            reply_info = reply['snippet']
            reply = {
                'Number': '',
                'textDisplay': reply_info['textDisplay'],
                'textOriginal': reply_info['textOriginal'],
                'authorDisplayName': reply_info['authorDisplayName'],
                'publishedAt': reply_info['publishedAt'],
                'updatedAt': reply_info['updatedAt']
            }
            results.append(reply)

df = pd.DataFrame(results)
df.to_excel('output.xlsx', index=False)
