File size: 22,812 Bytes
387f7f6
 
 
 
 
 
 
92e5d3a
e7be081
bcdfb7c
079e255
 
 
 
 
 
 
 
 
 
 
 
 
 
660f700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f26ef33
660f700
 
 
 
 
1a99a9c
 
 
 
 
 
396839f
387f7f6
b6d3aa2
31f53ef
b6d3aa2
1a99a9c
529f969
b6d3aa2
31f53ef
b6d3aa2
 
31f53ef
 
 
 
 
 
 
 
 
387f7f6
1a99a9c
387f7f6
 
1a99a9c
 
 
 
 
 
 
 
 
 
 
387f7f6
 
 
 
396839f
0226df2
387f7f6
1a99a9c
31f53ef
 
 
 
1a99a9c
31f53ef
 
387f7f6
 
 
 
31f53ef
1a99a9c
31f53ef
 
387f7f6
 
 
4f012ae
529f969
1a99a9c
396839f
387f7f6
396839f
 
 
 
 
 
 
387f7f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
809376c
 
 
 
 
 
 
387f7f6
 
 
 
 
396839f
387f7f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396839f
387f7f6
 
 
396839f
 
 
 
 
 
387f7f6
 
 
 
 
 
4f012ae
387f7f6
 
 
 
 
 
b6d3aa2
 
 
 
 
92701ab
b6d3aa2
 
 
 
 
 
 
 
 
 
 
 
396839f
 
 
 
 
 
 
 
 
 
0226df2
396839f
749ae87
387f7f6
749ae87
387f7f6
749ae87
387f7f6
4f012ae
396839f
4f012ae
387f7f6
0226df2
387f7f6
b6d3aa2
396839f
387f7f6
 
 
 
 
 
 
 
 
 
 
 
b6d3aa2
0226df2
387f7f6
 
b6d3aa2
 
387f7f6
b6d3aa2
 
92701ab
b6d3aa2
 
 
 
387f7f6
 
 
 
8ac0165
0226df2
b6d3aa2
 
387f7f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b3fe22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24c67cc
9b3fe22
 
 
24c67cc
9b3fe22
387f7f6
 
b6d3aa2
 
 
 
 
 
 
 
92701ab
 
 
 
387f7f6
81011d1
387f7f6
749ae87
396839f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import pandas as pd
import numpy as np
import zipfile
import textract
import gradio as gr
import shutil

def browse_folder(url):
    if url.lower().endswith(('docs', 'docs/')):
      return gr.update(choices=[])
    response = requests.get(url)
    response.raise_for_status()  # This will raise an exception if there's an error

    soup = BeautifulSoup(response.text, 'html.parser')

    excel_links = [a['href'] + '/' for a in soup.find_all('a', href=True) if a['href'].startswith(url)]

    return gr.update(choices=excel_links)



def extract_statuses(url):
    # Send a GET request to the webpage
    response = requests.get(url)

    # Parse the webpage content
    soup = BeautifulSoup(response.content, 'html.parser')

    # Find all links in the webpage
    links = soup.find_all('a')

    # Identify and download the Excel file
    for link in links:
        href = link.get('href')
        if href and (href.endswith('.xls') or href.endswith('.xlsx')):
            excel_url = href if href.startswith('http') else url + href
            excel_response = requests.get(excel_url)
            file_name = 'guide_status.xlsx' #excel_url.split('/')[-1]

            # Save the file
            with open(file_name, 'wb') as f:
                f.write(excel_response.content)

            # Read the Excel file
            df = pd.read_excel(file_name)

            # Check if 'TDoc Status' column exists and extract unique statuses
            if 'TDoc Status' in df.columns:
                unique_statuses = df['TDoc Status'].unique().tolist()
                print(f'Downloaded {file_name} and extracted statuses: {unique_statuses}')


                if 'withdrawn' in unique_statuses:
                    unique_statuses.remove('withdrawn')
                return gr.update(choices=unique_statuses, value=unique_statuses)
            else:
                print(f"'TDoc Status' column not found in {file_name}")
                return []


import os
import requests
from bs4 import BeautifulSoup
import pandas as pd
import gradio as gr

def scrape(url, excel_file, folder_name, status_list, progress=gr.Progress()):
    filenames = []
    status_filenames = []
    df = pd.DataFrame()  # Initialize df to ensure it's always defined

    # Try to process the Excel file if provided and valid
    if excel_file and os.path.exists(excel_file):
        try:
            df = pd.read_excel(excel_file)
            print(f"Initial DataFrame size: {len(df)}")

            if 'TDoc Status' in df.columns and status_list:
                df = df[df['TDoc Status'].isin(status_list)]
                print(f"Filtered DataFrame size: {len(df)}")

            if not df.empty:
                if 'TDoc' in df.columns and not df['TDoc'].isnull().all():
                    status_filenames = [f"{url}{row['TDoc']}.zip" for index, row in df.iterrows()]
                elif 'URL' in df.columns and not df['URL'].isnull().all():
                    status_filenames = df['URL'].tolist()

                print(f"Filenames from Excel: {status_filenames}")
        except Exception as e:
            print(f"Error reading Excel file: {e}")

    # If no valid Excel file is given or no status_filenames are found, download zip files directly from the URL
    if not excel_file or not status_filenames:
        print("Downloading zip files directly from the URL...")
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')
        zip_links = [a['href'] for a in soup.find_all('a', href=True) if a['href'].endswith('.zip')]

        # Construct absolute URLs for zip files
        status_filenames = [url + link if not link.startswith('http') else link for link in zip_links]
        print(f"Filenames from URL: {status_filenames}")

    download_directory = folder_name
    if not os.path.exists(download_directory):
        os.makedirs(download_directory)

    pourcentss = 0.05

    # Proceed with downloading files
    for file_url in status_filenames:
        filename = os.path.basename(file_url)
        save_path = os.path.join(download_directory, filename)
        progress(pourcentss, desc='Downloading')
        pourcentss += 0.4 / max(len(status_filenames), 1)  # Ensure non-zero division
        try:
            with requests.get(file_url, stream=True) as r:
                r.raise_for_status()
                with open(save_path, 'wb') as f:
                    for chunk in r.iter_content(chunk_size=8192):
                        f.write(chunk)
        except requests.exceptions.HTTPError as e:
            print(f"HTTP error occurred while downloading {file_url}: {e}")

    return True, len(status_filenames)






def extractZip(url):
    # Répertoire où les fichiers zip sont déjà téléchargés
    nom_extract = url.split("/")[-3] + "_extraction"
    if os.path.exists(nom_extract):    
        shutil.rmtree(nom_extract)
    extract_directory = nom_extract

    download_directory = url.split("/")[-3] + "_downloads"
      # Répertoire où le contenu des fichiers zip sera extrait

    # Extraire le contenu de tous les fichiers zip dans le répertoire de téléchargement
    for zip_file in os.listdir(download_directory):
        zip_path = os.path.join(download_directory, zip_file)
        # Vérifier si le fichier est un fichier zip
        if zip_file.endswith(".zip"):
            extract_dir = os.path.join(extract_directory, os.path.splitext(zip_file)[0])  # Supprimer l'extension .zip

            # Vérifier si le fichier zip existe
            if os.path.exists(zip_path):
                # Créer un répertoire pour extraire le contenu s'il n'existe pas
                if not os.path.exists(extract_dir):
                    os.makedirs(extract_dir)

                # Extraire le contenu du fichier zip
                try:
                    with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                        zip_ref.extractall(extract_dir)
    
                    print(f"Extraction terminée pour {zip_file}")
                except:
                    print(f"Erreur: Extraction {zip_file}")
            else:
                print(f"Fichier zip {zip_file} introuvable")

    print("Toutes les extractions sont terminées !")


def excel3gpp(url):
    response = requests.get(url)
    response.raise_for_status()  # This will raise an exception if there's an error

    # Use BeautifulSoup to parse the HTML content
    soup = BeautifulSoup(response.text, 'html.parser')

    # Look for Excel file links; assuming they have .xlsx or .xls extensions
    excel_links = [a['href'] for a in soup.find_all('a', href=True) if a['href'].endswith(('.xlsx', '.xls'))]

    # Download the first Excel file found (if any)
    if excel_links:
        excel_url = excel_links[0]  # Assuming you want the first Excel file
        if not excel_url.startswith('http'):
            excel_url = os.path.join(url, excel_url)  # Handle relative URLs

        # Download the Excel file
        excel_response = requests.get(excel_url)
        excel_response.raise_for_status()

        # Define the path where you want to save the file
        # Replace 'path_to_save_directory' with your desired path

        # Write the content of the Excel file to a local file
        # Write the content of the Excel file to a local file named 'guide.xlsx'

        nom_guide = 'guide.xlsx'  # Directly specify the filename
        if os.path.exists(nom_guide):
            os.remove(nom_guide)
        filepath = nom_guide


        with open(filepath, 'wb') as f:
            f.write(excel_response.content)
        print(f'Excel file downloaded and saved as: {filepath}')



def replace_line_breaks(text):
    return text.replace("\n", "/n")

def remod_text(text):
    return text.replace("/n", "\n")

def update_excel(data, excel_file, url):
    new_df_columns = ["URL", "File", "Type", "Title", "Source", "Status", "Content"]
    temp_df = pd.DataFrame(data, columns=new_df_columns)
    
    try:
        # Check if the Excel file already exists and append data to it
        if os.path.exists(excel_file):
            old_df = pd.read_excel(excel_file)
            df = pd.concat([old_df, temp_df], axis=0, ignore_index=True)
        else:
            df = temp_df

        # Save the updated data back to the Excel file
        df.to_excel(excel_file, index=False)
    except Exception as e:
        print(f"Error updating Excel file: {e}")

def extractionPrincipale(url, excel_file=None, status_list=None, progress=gr.Progress()):
    nom_download = url.split("/")[-3] + "_downloads"
    if os.path.exists(nom_download):    
        shutil.rmtree(nom_download)
    folder_name = nom_download

    nom_status = url.split("/")[-3] + "_status.xlsx"
    if os.path.exists(nom_status):
        os.remove(nom_status)
    temp_excel = nom_status

    progress(0.0,desc='Downloading')
    
    result, count = scrape(url, excel_file, folder_name, status_list)
    if result:
        print("Success")
    else:
        return(None)

    progress(0.4,desc='Extraction')
    extractZip(url)
    progress(0.5,desc='Extraction 2')
    excel3gpp(url)
    progress(0.6,desc='Creating Excel File')


    extract_directory = url.split("/")[-3] + "_extraction"
    categories = {
        "Other": ["URL", "File", "Type", "Title", "Source", "Content"],
        "CR": ["URL", "File", "Type", "Title", "Source", "Content"],
        "pCR":["URL", "File", "Type", "Title", "Source", "Content"],
        "LS": ["URL", "File", "Type", "Title", "Source", "Content"],
        "WID": ["URL", "File", "Type", "Title", "Source", "Content"],
        "SID": ["URL", "File", "Type", "Title", "Source", "Content"],
        "DISCUSSION": ["URL", "File", "Type", "Title", "Source", "Content"],
        "pdf": ["URL", "File", "Type", "Title", "Source", "Content"],
        "ppt": ["URL", "File", "Type", "Title", "Source", "Content"],
        "pptx": ["URL", "File", "Type", "Title", "Source", "Content"]
    }

    pourcents2=0.6
    data = []
    errors_count = 0
    processed_count = 0   # Counter for processed files

    pre_title_section = None

    try:
        df = pd.read_excel(temp_excel)
    except Exception as e:
        print(f"Initializing a new DataFrame because: {e}")
        df = pd.DataFrame(columns=["URL", "File", "Type", "Title", "Source", "Status", "Content"])

    for folder in os.listdir(extract_directory):
        folder_path = os.path.join(extract_directory, folder)
        if os.path.isdir(folder_path):
            for file in os.listdir(folder_path):
                progress(min(pourcents2,0.99),desc='Creating Excel File')
                pourcents2+=0.4/count


                if file == "__MACOSX":
                    continue
                file_path = os.path.join(folder_path, file)
                if file.endswith((".pptx", ".ppt", ".pdf", ".docx", ".doc", ".DOCX")):
                    try:
                        text = textract.process(file_path).decode('utf-8')
                    except Exception as e:
                        print(f"Error processing {file_path}: {e}")
                        errors_count += 1
                        continue

                    cleaned_text_lines = text.split('\n')
                    cleaned_text = '\n'.join([line.strip('|').strip() for line in cleaned_text_lines if line.strip()])

                    title = ""
                    debut = ""
                    sections = cleaned_text.split("Title:")
                    if len(sections) > 1:
                        pre_title_section = sections[0].strip().split()
                        title = sections[1].strip().split("\n")[0].strip()
                        debut = sections[0].strip()

                    category = "Other"
                    if file.endswith(".pdf"):
                        category = "pdf"
                    elif file.endswith((".ppt", ".pptx")):
                        category = "ppt"  # assuming all ppt and pptx files go into the same category
                    elif "CHANGE REQUEST" in debut:
                        category = "CR"
                    elif "Discussion" in title:
                        category = "DISCUSSION"
                    elif "WID" in title:
                        category = "WID"
                    elif "SID" in title:
                        category = "SID"
                    elif "LS" in title:
                        category = "LS"
                    elif pre_title_section and pre_title_section[-1] == 'pCR':
                        category = "pCR"
                    elif "Pseudo-CR" in title:
                        category = "pCR"


                    contenu = ""  # This will hold the concatenated content for 'Contenu' column
                    if category in categories:
                        columns = categories[category]
                        extracted_content = []
                        if category == "CR":
                            reason_for_change = ""
                            summary_of_change = ""
                            if len(sections) > 1:
                                reason_for_change = sections[1].split("Reason for change", 1)[-1].split("Summary of change")[0].strip()
                                summary_of_change = sections[1].split("Summary of change", 1)[-1].split("Consequences if not")[0].strip()
                            extracted_content.append(f"Reason for change: {reason_for_change}")
                            extracted_content.append(f"Summary of change: {summary_of_change}")
                        elif category == "pCR":
                            if len(sections) > 1:# Handle 'pCR' category-specific content extraction
                                pcr_specific_content = sections[1].split("Introduction", 1)[-1].split("First Change")[0].strip()
                            extracted_content.append(f"Introduction: {pcr_specific_content}")
                        elif category == "LS":
                            overall_review = ""
                            if len(sections) > 1:
                                overall_review = sections[1].split("Overall description", 1)[-1].strip()
                            extracted_content.append(f"Overall review: {overall_review}")
                        elif category in ["WID", "SID"]:
                            objective = ""
                            start_index = cleaned_text.find("Objective")
                            end_index = cleaned_text.find("Expected Output and Time scale")
                            if start_index != -1 and end_index != -1:
                                objective = cleaned_text[start_index + len("Objective"):end_index].strip()
                            extracted_content.append(f"Objective: {objective}")
                        elif category == "DISCUSSION":
                            Discussion = ""
                            extracted_text = replace_line_breaks(cleaned_text)
                            start_index_doc_for = extracted_text.find("Document for:")
                            if start_index_doc_for != -1:
                                start_index_word_after_doc_for = start_index_doc_for + len("Document for:")
                                end_index_word_after_doc_for = start_index_word_after_doc_for + extracted_text[start_index_word_after_doc_for:].find("/n")
                                word_after_doc_for = extracted_text[start_index_word_after_doc_for:end_index_word_after_doc_for].strip()
                                result_intro = ''
                                result_conclusion = ''
                                result_info = ''
                                if word_after_doc_for.lower() == "discussion":
                                    start_index_intro = extracted_text.find("Introduction")
                                    end_index_intro = extracted_text.find("Discussion", start_index_intro)

                                    intro_text = ""
                                    if start_index_intro != -1 and end_index_intro != -1:
                                        intro_text = extracted_text[start_index_intro + len("Introduction"):end_index_intro].strip()
                                        result_intro = remod_text(intro_text)  # Convert back line breaks
                                    else:
                                        result_intro = "Introduction section not found."

                                    # Attempt to find "Conclusion"
                                    start_index_conclusion = extracted_text.find("Conclusion", end_index_intro)
                                    end_index_conclusion = extracted_text.find("Proposal", start_index_conclusion if start_index_conclusion != -1 else end_index_intro)

                                    conclusion_text = ""
                                    if start_index_conclusion != -1 and end_index_conclusion != -1:
                                        conclusion_text = extracted_text[start_index_conclusion + len("Conclusion"):end_index_conclusion].strip()
                                        result_conclusion = remod_text(conclusion_text)
                                    elif start_index_conclusion == -1:  # Conclusion not found, look for Proposal directly
                                        start_index_proposal = extracted_text.find("Proposal", end_index_intro)
                                        if start_index_proposal != -1:
                                            end_index_proposal = len(extracted_text)  # Assuming "Proposal" section goes till the end if present
                                            proposal_text = extracted_text[start_index_proposal + len("Proposal"):end_index_proposal].strip()
                                            result_conclusion = remod_text(proposal_text)  # Using "Proposal" content as "Conclusion"
                                        else:
                                            result_conclusion = "Conclusion/Proposal section not found."
                                    else:
                                        # Handle case where "Conclusion" exists but no "Proposal" to mark its end
                                        conclusion_text = extracted_text[start_index_conclusion + len("Conclusion"):].strip()
                                        result_conclusion = remod_text(conclusion_text)
                                    Discussion=f"Introduction: {result_intro}\nConclusion/Proposal: {result_conclusion}"
                                elif word_after_doc_for.lower() == "information":
                                    start_index_info = extracted_text.find(word_after_doc_for)
                                    if start_index_info != -1:
                                        info_to_end = extracted_text[start_index_info + len("Information"):].strip()
                                        result_info = remod_text(info_to_end)
                                    Discussion = f"Discussion:{result_info}"
                                else:
                                    Discussion = "The word after 'Document for:' is not 'Discussion', 'DISCUSSION', 'Information', or 'INFORMATION'."
                            else:
                                Discussion = "The phrase 'Document for:' was not found."
                            # Since DISCUSSION category handling requires more specific processing, adapt as necessary
                            # Here's a simplified example
                            discussion_details = Discussion
                            extracted_content.append(discussion_details)
                        # Add more categories as needed
                        contenu = "\n".join(extracted_content)

                    # Assuming 'source' needs to be filled from the guide.xlsx mapping
                    # Placeholder for source value calculation
                    source = ""  # Update this with actual source determination logic
                    status = ""
                    data.append([url+ "/" + folder + '.zip', folder , category, title, source,status, contenu])

                    guide_file = 'guide.xlsx'
                    if os.path.exists(guide_file):
                        # If guide.xlsx exists, proceed with operations that require it
                        try:
                            guide_df = pd.read_excel(guide_file, usecols=['Source', 'TDoc', 'TDoc Status'])
                            # Continue with the operations that require guide.xlsx
                            # For example, reading the file, processing the data, etc.
                            tdoc_source_map = {row['TDoc']: row['Source'] for index, row in guide_df.iterrows()}
                            tdoc_status_map = {row['TDoc']: row['TDoc Status'] for index, row in guide_df.iterrows()}
                            # Update the 'Source' in your data based on matching 'Nom du fichier' with 'TDoc'
                            for item in data:
                                nom_du_fichier = item[1]  # Assuming 'Nom du fichier' is the first item in your data list
                                if nom_du_fichier in tdoc_source_map:
                                    item[4] = tdoc_source_map[nom_du_fichier]  # Update the 'Source' field, assuming it's the fourth item
                                    item[5] = tdoc_status_map[nom_du_fichier]
                            # Your code that depends on guide.xlsx goes here
                            
                        except Exception as e:
                            print(f"An error occurred while processing {guide_file}: {e}")
                            # Handle any errors that arise during processing
                    else:
                        print(f"File {guide_file} not found. Skipping operations that require this file.")
                        # Since guide.xlsx is not found, skip the related operations
                        
                        
                    


                    processed_count += 1

                # Check if it's time to update the Excel file
                    if processed_count % 20 == 0:
                        update_excel(data, temp_excel, url)
                        print(f"Updated after processing {processed_count} files.")
                        data = []  # Clear the data list after updating

    if data:
    # This final call ensures that any remaining data is processed and saved.
        update_excel(data, temp_excel, url)
        print(f"Final update after processing all files.")

    file_name = temp_excel
    # Save the updated DataFrame to Excel
    return file_name