File size: 18,277 Bytes
9dbf344
4cfed8e
9dbf344
 
 
 
 
9eeba1e
b4510a6
 
 
55f0ce3
 
b4510a6
 
9dbf344
55f0ce3
 
 
 
 
 
 
 
 
 
4effac0
55f0ce3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1e2bb3e
 
 
55f0ce3
1e2bb3e
 
 
 
 
 
 
4effac0
55f0ce3
 
 
 
1e2bb3e
 
55f0ce3
4effac0
1e2bb3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55f0ce3
 
1e2bb3e
4effac0
55f0ce3
 
 
1e2bb3e
55f0ce3
 
1e2bb3e
 
55f0ce3
1e2bb3e
55f0ce3
 
1e2bb3e
9dbf344
 
 
 
 
 
 
 
 
 
 
5d87c3c
 
b4510a6
 
9dbf344
 
 
 
 
 
 
 
 
 
b4510a6
9dbf344
b4510a6
9dbf344
b4510a6
9dbf344
 
 
 
5d87c3c
b4510a6
 
 
 
 
 
 
9dbf344
 
 
 
 
b4510a6
9dbf344
5d87c3c
9dbf344
5d87c3c
 
b4510a6
 
 
9dbf344
 
 
9c6425d
5d87c3c
 
 
b4510a6
9dbf344
5d87c3c
 
 
 
 
 
 
9dbf344
9c6425d
5d87c3c
 
 
b4510a6
 
9c6425d
b4510a6
 
 
 
 
9c6425d
b4510a6
 
 
 
 
5d87c3c
9eeba1e
55f0ce3
9dbf344
381f959
 
 
 
 
 
 
 
 
 
 
 
04a15c5
381f959
 
 
 
 
 
 
 
0a543a0
381f959
0a543a0
381f959
9dbf344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4cfed8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4510a6
 
55f0ce3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4510a6
55f0ce3
b4510a6
55f0ce3
b4510a6
55f0ce3
 
b4510a6
 
 
55f0ce3
b4510a6
55f0ce3
b4510a6
55f0ce3
 
 
 
 
 
 
 
 
 
 
 
1e2bb3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55f0ce3
 
 
 
 
 
 
 
 
 
1e2bb3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55f0ce3
 
1e2bb3e
 
 
55f0ce3
 
1e2bb3e
 
55f0ce3
 
356791c
55f0ce3
1e2bb3e
 
55f0ce3
1e2bb3e
 
 
356791c
 
55f0ce3
 
b4510a6
55f0ce3
 
b4510a6
55f0ce3
 
 
b4510a6
55f0ce3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4510a6
55f0ce3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
import os
import zipfile
import re
import pandas as pd
import gradio as gr
import gzip
import pickle
import numpy as np
from bertopic import BERTopic
from datetime import datetime

from typing import List, Tuple

today = datetime.now().strftime("%d%m%Y")
today_rev = datetime.now().strftime("%Y%m%d")

def get_or_create_env_var(var_name:str, default_value:str) -> str:
    # Get the environment variable if it exists
    value = os.environ.get(var_name)
    
    # If it doesn't exist, set it to the default value
    if value is None:
        os.environ[var_name] = default_value
        value = default_value
    
    return value

# Retrieving or setting output folder
env_var_name = 'GRADIO_OUTPUT_FOLDER'
default_value = 'output/'

output_folder = get_or_create_env_var(env_var_name, default_value)
print(f'The value of {env_var_name} is {output_folder}')

def ensure_output_folder_exists():
    """Checks if the 'output/' folder exists, creates it if not."""

    folder_name = "output/"

    if not os.path.exists(folder_name):
        # Create the folder if it doesn't exist
        os.makedirs(folder_name)
        print(f"Created the 'output/' folder.")
    else:
        print(f"The 'output/' folder already exists.")

async def get_connection_params(request: gr.Request):
    base_folder = ""

    if request:
        #print("request user:", request.username)

        #request_data = await request.json()  # Parse JSON body
        #print("All request data:", request_data)
        #context_value = request_data.get('context') 
        #if 'context' in request_data:
        #     print("Request context dictionary:", request_data['context'])

        # print("Request headers dictionary:", request.headers)
        # print("All host elements", request.client)           
        # print("IP address:", request.client.host)
        # print("Query parameters:", dict(request.query_params))
        # To get the underlying FastAPI items you would need to use await and some fancy @ stuff for a live query: https://fastapi.tiangolo.com/vi/reference/request/
        #print("Request dictionary to object:", request.request.body())
        print("Session hash:", request.session_hash)

        # Retrieving or setting CUSTOM_CLOUDFRONT_HEADER
        CUSTOM_CLOUDFRONT_HEADER_var = get_or_create_env_var('CUSTOM_CLOUDFRONT_HEADER', '')
        #print(f'The value of CUSTOM_CLOUDFRONT_HEADER is {CUSTOM_CLOUDFRONT_HEADER_var}')

        # Retrieving or setting CUSTOM_CLOUDFRONT_HEADER_VALUE
        CUSTOM_CLOUDFRONT_HEADER_VALUE_var = get_or_create_env_var('CUSTOM_CLOUDFRONT_HEADER_VALUE', '')
        #print(f'The value of CUSTOM_CLOUDFRONT_HEADER_VALUE_var is {CUSTOM_CLOUDFRONT_HEADER_VALUE_var}')

        if CUSTOM_CLOUDFRONT_HEADER_var and CUSTOM_CLOUDFRONT_HEADER_VALUE_var:
            if CUSTOM_CLOUDFRONT_HEADER_var in request.headers:
                supplied_cloudfront_custom_value = request.headers[CUSTOM_CLOUDFRONT_HEADER_var]
                if supplied_cloudfront_custom_value == CUSTOM_CLOUDFRONT_HEADER_VALUE_var:
                    print("Custom Cloudfront header found:", supplied_cloudfront_custom_value)
                else:
                    raise(ValueError, "Custom Cloudfront header value does not match expected value.")

        # Get output save folder from 1 - username passed in from direct Cognito login, 2 - Cognito ID header passed through a Lambda authenticator, 3 - the session hash.

        if request.username:
            out_session_hash = request.username
            base_folder = "user-files/"

        elif 'x-cognito-id' in request.headers:
            out_session_hash = request.headers['x-cognito-id']
            base_folder = "user-files/"
            print("Cognito ID found:", out_session_hash)

        else:
            out_session_hash = request.session_hash
            base_folder = "temp-files/"
            # print("Cognito ID not found. Using session hash as save folder:", out_session_hash)

        output_folder = base_folder + out_session_hash + "/"
        #if bucket_name:
        #    print("S3 output folder is: " + "s3://" + bucket_name + "/" + output_folder)

        return out_session_hash, output_folder
    else:
        print("No session parameters found.")
        return "",""

def detect_file_type(filename):
    """Detect the file type based on its extension."""
    if (filename.endswith('.csv')) | (filename.endswith('.csv.gz')) | (filename.endswith('.zip')):
        return 'csv'
    elif filename.endswith('.xlsx'):
        return 'xlsx'
    elif filename.endswith('.parquet'):
        return 'parquet'
    elif filename.endswith('.pkl.gz'):
        return 'pkl.gz'
    elif filename.endswith('.pkl'):
        return 'pkl'
    elif filename.endswith('.npz'):
        return 'npz'
    else:
        raise ValueError("Unsupported file type.")

def read_file(filename):
    """Read the file based on its detected type."""
    file_type = detect_file_type(filename)
        
    print("Loading in file")

    if file_type == 'csv':
        file = pd.read_csv(filename, low_memory=False)#.reset_index().drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
    elif file_type == 'xlsx':
        file = pd.read_excel(filename)#.reset_index().drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
    elif file_type == 'parquet':
        file = pd.read_parquet(filename)#.reset_index().drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
    elif file_type == 'pkl.gz':
        with gzip.open(filename, 'rb') as file:
            file = pickle.load(file)
            #file = pd.read_pickle(filename)
    elif file_type == 'pkl':
        file = BERTopic.load(filename)
    elif file_type == 'npz':
        file = np.load(filename)['arr_0']

        # If embedding files have 'super_compress' in the title, they have been multiplied by 100 before save
        if "compress" in filename:
            file /= 100

    print("File load complete")

    return file

def initial_file_load(in_file):
    '''

    When file is loaded, update the column dropdown choices and write to relevant data states.

    '''
    new_choices = []
    concat_choices = []
    custom_labels = pd.DataFrame()
    topic_model = None
    embeddings = np.array([])

    file_list = [string.name for string in in_file]

    data_file_names = [string for string in file_list if "npz" not in string.lower() and "pkl" not in string.lower() and "topic_list.csv" not in string.lower()]
    if data_file_names:
        data_file_name = data_file_names[0]
        df = read_file(data_file_name)
        data_file_name_no_ext = get_file_path_end(data_file_name)

        new_choices = list(df.columns)
        concat_choices.extend(new_choices)
        output_text = "Data file loaded."
    else:
        error = "No data file provided."
        print(error)
        output_text = error

    model_file_names = [string for string in file_list if "pkl" in string.lower()]
    if model_file_names:
        model_file_name = model_file_names[0]
        topic_model = read_file(model_file_name)
        output_text = "Bertopic model loaded."

    embedding_file_names = [string for string in file_list if "npz" in string.lower()]
    if embedding_file_names:
        embedding_file_name = embedding_file_names[0]
        embeddings = read_file(embedding_file_name)
        output_text = "Embeddings loaded."

    label_file_names = [string for string in file_list if "topic_list" in string.lower()]
    if label_file_names:
        label_file_name = label_file_names[0]
        custom_labels = read_file(label_file_name)
        output_text = "Labels loaded." 
   
        
    #The np.array([]) at the end is for clearing the embedding state when a new file is loaded
    return gr.Dropdown(choices=concat_choices), gr.Dropdown(choices=concat_choices), df, output_text, topic_model, embeddings, data_file_name_no_ext, custom_labels, df

def custom_regex_load(in_file):
    '''

    When file is loaded, update the column dropdown choices and write to relevant data states.

    '''

    custom_regex = pd.DataFrame()

    file_list = [string.name for string in in_file]

    regex_file_names = [string for string in file_list if "csv" in string.lower()]
    if regex_file_names:
        regex_file_name = regex_file_names[0]
        custom_regex = pd.read_csv(regex_file_name, low_memory=False, header=None)
        #regex_file_name_no_ext = get_file_path_end(regex_file_name)

        output_text = "Data file loaded."
        print(output_text)
    else:
        error = "No regex file provided."
        print(error)
        output_text = error
        return error, custom_regex
       
    return output_text, custom_regex

def get_file_path_end(file_path):
    # First, get the basename of the file (e.g., "example.txt" from "/path/to/example.txt")
    basename = os.path.basename(file_path)
    
    # Then, split the basename and its extension and return only the basename without the extension
    filename_without_extension, _ = os.path.splitext(basename)

    #print(filename_without_extension)
    
    return filename_without_extension

def get_file_path_end_with_ext(file_path):
    match = re.search(r'(.*[\/\\])?(.+)$', file_path)
        
    filename_end = match.group(2) if match else ''

    return filename_end

# Zip the above to export file
def zip_folder(folder_path, output_zip_file):
    # Create a ZipFile object in write mode
    with zipfile.ZipFile(output_zip_file, 'w', zipfile.ZIP_DEFLATED) as zipf:
        # Walk through the directory
        for root, dirs, files in os.walk(folder_path):
            for file in files:
                # Create a complete file path
                file_path = os.path.join(root, file)
                # Add file to the zip file
                # The arcname argument sets the archive name, i.e., the name within the zip file
                zipf.write(file_path, arcname=os.path.relpath(file_path, folder_path))

def delete_files_in_folder(folder_path):
    # Check if the folder exists
    if not os.path.exists(folder_path):
        print(f"The folder {folder_path} does not exist.")
        return

    # Iterate over all files in the folder and remove each
    for filename in os.listdir(folder_path):
        file_path = os.path.join(folder_path, filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
            else:
                print(f"Skipping {file_path} as it is a directory")
        except Exception as e:
            print(f"Failed to delete {file_path}. Reason: {e}")

def save_topic_outputs(topic_model: BERTopic, data_file_name_no_ext: str, output_list: List[str], docs: List[str], save_topic_model: bool, prepared_docs: pd.DataFrame, split_sentence_drop: str, output_folder: str = output_folder, progress: gr.Progress = gr.Progress()) -> Tuple[List[str], str]:
    """

    Save the outputs of a topic model to specified files.



    Args:

        topic_model (BERTopic): The topic model object.

        data_file_name_no_ext (str): The base name of the data file without extension.

        output_list (List[str]): List to store the output file names.

        docs (List[str]): List of documents.

        save_topic_model (bool): Flag to save the topic model.

        prepared_docs (pd.DataFrame): DataFrame containing prepared documents.

        split_sentence_drop (str): Option to split sentences ("Yes" or "No").

        output_folder (str, optional): Folder to save the output files. Defaults to output_folder.

        progress (gr.Progress, optional): Progress tracker. Defaults to gr.Progress().



    Returns:

        Tuple[List[str], str]: A tuple containing the list of output file names and a status message.

    """
        
    progress(0.7, desc= "Checking data")

    topic_dets = topic_model.get_topic_info()

    if topic_dets.shape[0] == 1:
        topic_det_output_name = output_folder + "topic_details_" + data_file_name_no_ext + "_" + today_rev + ".csv"
        topic_dets.to_csv(topic_det_output_name)
        output_list.append(topic_det_output_name)

        return output_list, "No topics found, original file returned"

    progress(0.8, desc= "Saving output")
    
    topic_det_output_name = output_folder + "topic_details_" + data_file_name_no_ext + "_" + today_rev + ".csv"
    topic_dets.to_csv(topic_det_output_name)
    output_list.append(topic_det_output_name)

    doc_det_output_name = output_folder + "doc_details_" + data_file_name_no_ext + "_" + today_rev + ".csv"

    ## Check that the following columns exist in the dataframe, keep only the ones that exist
    columns_to_check = ["Document",	"Topic", "Name", "Probability", "Representative_document"]

    columns_found = [column for column in columns_to_check if column in topic_model.get_document_info(docs).columns]
    doc_dets = topic_model.get_document_info(docs)[columns_found]

    ### If there are full topic probabilities, join these on to the document details df
    def is_valid_dataframe(df):
        """

        Checks if the given object is a non-empty pandas DataFrame.



        Args:

            df: The object to check.



        Returns:

            True if df is a non-empty DataFrame, False otherwise.

        """
        if df is None:  # Check for None first
            return False
        return isinstance(df, pd.DataFrame) and not df.empty
    
    if is_valid_dataframe(topic_model.probabilities_):
        doc_dets = doc_dets.merge(topic_model.probabilities_, left_index=True, right_index=True, how="left")

    # If you have created a 'sentence split' dataset from the cleaning options, map these sentences back to the original document.
    try:
        if split_sentence_drop == "Yes":
            doc_dets = doc_dets.merge(prepared_docs[['document_index']], how = "left", left_index=True, right_index=True)
            doc_dets = doc_dets.rename(columns={"document_index": "parent_document_index"}, errors='ignore')

            # 1. Group by Parent Document Index:
            grouped = doc_dets.groupby('parent_document_index')

            # 2. Aggregate Topics and Probabilities:
            # def aggregate_topics(group):
            #     original_text = ' '.join(group['Document'])
            #     topics = group['Topic'].tolist()

            #     if 'Name' in group.columns:
            #         topic_names = group['Name'].tolist()
            #     else:
            #         topic_names = None

            #     if 'Probability' in group.columns:
            #         probabilities = group['Probability'].tolist()
            #     else:
            #         probabilities = None  # Or any other default value you prefer

            #     return pd.Series({'Document':original_text, 'Topic numbers': topics, 'Topic names': topic_names, 'Probabilities': probabilities})

            def aggregate_topics(group):
                original_text = ' '.join(group['Document'])

                # Filter out topics starting with '-1'
                topics = [topic for topic in group['Topic'].tolist() if not str(topic).startswith('-1')]

                if 'Name' in group.columns:
                    # Filter out topic names corresponding to excluded topics
                    topic_names = [name for topic, name in zip(group['Topic'], group['Name'].tolist()) if not str(topic).startswith('-1')]
                else:
                    topic_names = None

                if 'Probability' in group.columns:
                    # Filter out probabilities corresponding to excluded topics
                    probabilities = [prob for topic, prob in zip(group['Topic'], group['Probability'].tolist()) if not str(topic).startswith('-1')]
                else:
                    probabilities = None 

                return pd.Series({'Document': original_text, 'Topic numbers': topics, 'Topic names': topic_names, 'Probabilities': probabilities})


            #result_df = grouped.apply(aggregate_topics).reset_index()
            doc_det_agg = grouped.apply(lambda x: aggregate_topics(x)).reset_index()

            # Join back original text
            #doc_det_agg = doc_det_agg.merge(original_data[[in_colnames_list_first]], how = "left", left_index=True, right_index=True)

            doc_det_agg_output_name = output_folder + "doc_details_agg_" + data_file_name_no_ext + "_" + today_rev + ".csv"
            doc_det_agg.to_csv(doc_det_agg_output_name)
            output_list.append(doc_det_agg_output_name)

    except Exception as e:
        print("Creating aggregate document details failed, error:", e)

    # Save document details to file
    doc_dets.to_csv(doc_det_output_name)
    output_list.append(doc_det_output_name)


    if "CustomName" in topic_dets.columns:
        topics_text_out_str = str(topic_dets["CustomName"])
    else:
        topics_text_out_str = str(topic_dets["Name"])
    output_text = "Topics: " + topics_text_out_str

    # Save topic model to file
    if save_topic_model == "Yes":
        print("Saving BERTopic model in .pkl format.")

        #folder_path = output_folder #"output_model/"

        #if not os.path.exists(folder_path):
            # Create the folder
        #    os.makedirs(folder_path)

        topic_model_save_name_pkl = output_folder + data_file_name_no_ext + "_topics_" + today_rev + ".pkl"# + ".safetensors"
        topic_model_save_name_zip = topic_model_save_name_pkl + ".zip"

        # Clear folder before replacing files
        #delete_files_in_folder(topic_model_save_name_pkl)

        topic_model.save(topic_model_save_name_pkl, serialization='pickle', save_embedding_model=False, save_ctfidf=False)

        # Zip file example
        
        #zip_folder(topic_model_save_name_pkl, topic_model_save_name_zip)
        output_list.append(topic_model_save_name_pkl)

    return output_list, output_text