File size: 16,591 Bytes
f94920a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0767cb0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
from Bio import Entrez, Medline
#import model
import mtdna_classifier
from NER.html import extractHTML
import data_preprocess
import pipeline
import aiohttp
import asyncio
# Setup
def fetch_ncbi(accession_number):
  try:
    Entrez.email = "your.email@example.com" # Required by NCBI, REPLACE WITH YOUR EMAIL
    handle = Entrez.efetch(db="nucleotide", id=str(accession_number), rettype="gb", retmode="xml")
    record = Entrez.read(handle)
    handle.close()
    outputs = {"authors":"unknown",
              "institution":"unknown",
              "isolate":"unknown",
              "definition":"unknown",
              "title":"unknown",
              "seq_comment":"unknown",
              "collection_date":"unknown" } #'GBSeq_update-date': '25-OCT-2023', 'GBSeq_create-date' 
    gb_seq = None
    # Validate record structure: It should be a list with at least one element (a dict)
    if isinstance(record, list) and len(record) > 0:
        if isinstance(record[0], dict):
            gb_seq = record[0]
        else:
            print(f"Warning: record[0] is not a dictionary for {accession_number}. Type: {type(record[0])}")
        # extract collection date  
        if "GBSeq_create-date" in gb_seq and outputs["collection_date"]=="unknown":
          outputs["collection_date"] = gb_seq["GBSeq_create-date"]
        else:
          if "GBSeq_update-date" in gb_seq and outputs["collection_date"]=="unknown":
            outputs["collection_date"] = gb_seq["GBSeq_update-date"]
        # extract definition
        if "GBSeq_definition" in gb_seq and outputs["definition"]=="unknown":
          outputs["definition"] = gb_seq["GBSeq_definition"]
        # extract related-reference things
        if "GBSeq_references" in gb_seq:
          for ref in gb_seq["GBSeq_references"]:
            # extract authors
            if "GBReference_authors" in ref and outputs["authors"]=="unknown":
              outputs["authors"] = "and ".join(ref["GBReference_authors"])
            # extract title
            if "GBReference_title" in ref and outputs["title"]=="unknown":
              outputs["title"] = ref["GBReference_title"]  
            #  extract submitted journal
            if 'GBReference_journal' in ref and outputs["institution"]=="unknown":
              outputs["institution"] = ref['GBReference_journal']
        # extract seq_comment
        if 'GBSeq_comment'in gb_seq and outputs["seq_comment"]=="unknown":
          outputs["seq_comment"] = gb_seq["GBSeq_comment"]
        # extract isolate
        if "GBSeq_feature-table" in gb_seq:
          if 'GBFeature_quals' in gb_seq["GBSeq_feature-table"][0]:
            for ref in gb_seq["GBSeq_feature-table"][0]["GBFeature_quals"]:
              if ref['GBQualifier_name'] == "isolate" and outputs["isolate"]=="unknown":
                outputs["isolate"] = ref["GBQualifier_value"]
    else:
        print(f"Warning: No valid record or empty record list from NCBI for {accession_number}.")

    # If gb_seq is still None, return defaults
    if gb_seq is None:
        return {"authors":"unknown",
              "institution":"unknown",
              "isolate":"unknown",
              "definition":"unknown",
              "title":"unknown",
              "seq_comment":"unknown",
              "collection_date":"unknown" }
    return outputs   
  except:
    print("error in fetching ncbi data")   
    return {"authors":"unknown",
              "institution":"unknown",
              "isolate":"unknown",
              "definition":"unknown",
              "title":"unknown",
              "seq_comment":"unknown",
              "collection_date":"unknown" }
# Fallback if NCBI crashed or cannot find accession on NBCI
def google_accession_search(accession_id):
    """

    Search for metadata by accession ID using Google Custom Search.

    Falls back to known biological databases and archives.

    """
    queries = [
        f"{accession_id}",
        f"{accession_id} site:ncbi.nlm.nih.gov",
        f"{accession_id} site:pubmed.ncbi.nlm.nih.gov",
        f"{accession_id} site:europepmc.org",
        f"{accession_id} site:researchgate.net",
        f"{accession_id} mtDNA",
        f"{accession_id} mitochondrial DNA"
    ]
    
    links = []
    for query in queries:
        search_results = mtdna_classifier.search_google_custom(query, 2)
        for link in search_results:
            if link not in links:
                links.append(link)
    return links
             
# Method 1: Smarter Google
def smart_google_queries(metadata: dict):
    queries = []

    # Extract useful fields
    isolate = metadata.get("isolate")
    author = metadata.get("authors")
    institution = metadata.get("institution")
    title = metadata.get("title")
    combined = []
    # Construct queries
    if isolate and isolate!="unknown" and isolate!="Unpublished":
        queries.append(f'"{isolate}" mitochondrial DNA')
        queries.append(f'"{isolate}" site:ncbi.nlm.nih.gov')
        
    if author and author!="unknown" and author!="Unpublished":
        # try:
        #   author_name = ".".join(author.split(' ')[0].split(".")[:-1])  # Use last name only
        # except:
        #   try:
        #     author_name = author.split(',')[0]  # Use last name only
        #   except:  
        #     author_name = author
        try:
            author_name = author.split(',')[0]  # Use last name only
        except:  
            author_name = author
        queries.append(f'"{author_name}" mitochondrial DNA')
        queries.append(f'"{author_name}" mtDNA site:researchgate.net')
        
    if institution and institution!="unknown" and institution!="Unpublished":
        try:
          short_inst = ",".join(institution.split(',')[:2])  # Take first part of institution
        except:
          try:
            short_inst = institution.split(',')[0]
          except:
            short_inst = institution
        queries.append(f'"{short_inst}" mtDNA sequence')
        #queries.append(f'"{short_inst}" isolate site:nature.com')
    if title and title!='unknown' and title!="Unpublished":
      if title!="Direct Submission":
        queries.append(title)  
          
    return queries

# def filter_links_by_metadata(search_results, saveLinkFolder, accession=None, stop_flag=None):
#     TRUSTED_DOMAINS = [
#     "ncbi.nlm.nih.gov",
#     "pubmed.ncbi.nlm.nih.gov",
#     "pmc.ncbi.nlm.nih.gov",
#     "biorxiv.org",
#     "researchgate.net",
#     "nature.com",
#     "sciencedirect.com"
#     ]
#     if stop_flag is not None and stop_flag.value:
#         print(f"πŸ›‘ Stop detected {accession}, aborting early...")
#         return []
#     def is_trusted_link(link):
#       for domain in TRUSTED_DOMAINS:
#         if domain in link:
#           return True
#       return False
#     def is_relevant_title_snippet(link, saveLinkFolder, accession=None):
#       output = []
#       keywords = ["mtDNA", "mitochondrial", "accession", "isolate", "Homo sapiens", "sequence"]
#       if accession:
#         keywords = [accession] + keywords
#       title_snippet = link.lower()
#       print("save link folder inside this filter function: ", saveLinkFolder)  
#       success_process, output_process = pipeline.run_with_timeout(data_preprocess.extract_text,args=(link,saveLinkFolder),timeout=60)
#       if stop_flag is not None and stop_flag.value:
#         print(f"πŸ›‘ Stop detected {accession}, aborting early...")
#         return []
#       if success_process:
#           article_text = output_process
#           print("yes succeed for getting article text")
#       else: 
#           print("no suceed, fallback to no link")
#           article_text = ""  
#       #article_text = data_preprocess.extract_text(link,saveLinkFolder)
#       print("article text")
#       #print(article_text)  
#       if stop_flag is not None and stop_flag.value:
#         print(f"πŸ›‘ Stop detected {accession}, aborting early...")
#         return []  
#       try:
#         ext = link.split(".")[-1].lower()
#         if ext not in ["pdf", "docx", "xlsx"]:
#             html = extractHTML.HTML("", link)
#             if stop_flag is not None and stop_flag.value:
#                 print(f"πŸ›‘ Stop detected {accession}, aborting early...")
#                 return []
#             jsonSM = html.getSupMaterial()
#             if jsonSM:
#                 output += sum((jsonSM[key] for key in jsonSM), [])
#       except Exception:
#         pass  # continue silently
#       for keyword in keywords:
#         if keyword.lower() in article_text.lower():
#           if link not in output:
#             output.append([link,keyword.lower()])
#           print("link and keyword for article text: ", link, keyword)    
#           return output
#         if keyword.lower() in title_snippet.lower():
#           if link not in output:
#             output.append([link,keyword.lower()])
#           print("link and keyword for title: ", link, keyword)    
#           return output
#       return output
    
#     filtered = []
#     better_filter = []
#     if len(search_results) > 0:
#       for link in search_results:
#           # if is_trusted_link(link):
#           #   if link not in filtered:
#           #     filtered.append(link)
#           # else:
#           print(link)
#           if stop_flag is not None and stop_flag.value:
#             print(f"πŸ›‘ Stop detected {accession}, aborting early...")
#             return []
#           if link:    
#             output_link = is_relevant_title_snippet(link,saveLinkFolder, accession)
#             print("output link: ")
#             print(output_link)
#             for out_link in output_link:
#               if isinstance(out_link,list) and len(out_link) > 1:
#                 print(out_link)
#                 kw = out_link[1]
#                 print("kw and acc: ", kw, accession.lower())  
#                 if accession and kw == accession.lower():
#                   better_filter.append(out_link[0])
#                 filtered.append(out_link[0])
#               else: filtered.append(out_link)
#           print("done with link and here is filter: ",filtered)      
#     if better_filter:
#       filtered = better_filter      
#     return filtered
async def process_link(session, link, saveLinkFolder, keywords, accession):
    output = []
    title_snippet = link.lower()

    # use async extractor for web, fallback to sync for local files
    if link.startswith("http"):
        article_text = await data_preprocess.async_extract_text(link, saveLinkFolder)
    else:
        article_text = data_preprocess.extract_text(link, saveLinkFolder)

    for keyword in keywords:
        if article_text and keyword.lower() in article_text.lower():
            output.append([link, keyword.lower(), article_text])
            return output
        if keyword.lower() in title_snippet:
            output.append([link, keyword.lower()])
            return output
    return output

async def async_filter_links_by_metadata(search_results, saveLinkFolder, accession=None):
    TRUSTED_DOMAINS = [
        "ncbi.nlm.nih.gov", "pubmed.ncbi.nlm.nih.gov", "pmc.ncbi.nlm.nih.gov",
        "biorxiv.org", "researchgate.net", "nature.com", "sciencedirect.com"
    ]

    keywords = ["mtDNA", "mitochondrial", "accession", "isolate", "Homo sapiens", "sequence"]
    if accession:
        keywords = [accession] + keywords

    filtered, better_filter = {}, {}
    print("before doing session")
    async with aiohttp.ClientSession() as session:
        tasks = []
        for link in search_results:
            if link:
                print("link: ", link)
                tasks.append(process_link(session, link, saveLinkFolder, keywords, accession))
                print("done")
        results = await asyncio.gather(*tasks)
        print("outside session")
    # merge results
    for output_link in results:
        for out_link in output_link:
            if isinstance(out_link, list) and len(out_link) > 1:
                kw = out_link[1]
                if accession and kw == accession.lower():
                    if len(out_link) == 2:
                        better_filter[out_link[0]] = ""
                    elif len(out_link) == 3:
                        better_filter[out_link[0]] = out_link[2]
                if len(out_link) == 2:
                    better_filter[out_link[0]] = ""
                elif len(out_link) == 3:
                    better_filter[out_link[0]] = out_link[2]
            else:
                filtered[out_link] = ""

    return better_filter or filtered

def filter_links_by_metadata(search_results, saveLinkFolder, accession=None):
    TRUSTED_DOMAINS = [
    "ncbi.nlm.nih.gov",
    "pubmed.ncbi.nlm.nih.gov",
    "pmc.ncbi.nlm.nih.gov",
    "biorxiv.org",
    "researchgate.net",
    "nature.com",
    "sciencedirect.com"
    ]
    def is_trusted_link(link):
      for domain in TRUSTED_DOMAINS:
        if domain in link:
          return True
      return False
    def is_relevant_title_snippet(link, saveLinkFolder, accession=None):
      output = []
      keywords = ["mtDNA", "mitochondrial", "Homo sapiens"]
      #keywords = ["mtDNA", "mitochondrial"]
      if accession:
        keywords = [accession] + keywords
      title_snippet = link.lower()
      #print("save link folder inside this filter function: ", saveLinkFolder)  
      article_text = data_preprocess.extract_text(link,saveLinkFolder)
      print("article text done")
      #print(article_text)  
      try:
        ext = link.split(".")[-1].lower()
        if ext not in ["pdf", "docx", "xlsx"]:
            html = extractHTML.HTML("", link)
            jsonSM = html.getSupMaterial()
            if jsonSM:
                output += sum((jsonSM[key] for key in jsonSM), [])
      except Exception:
        pass  # continue silently
      for keyword in keywords:
        if article_text:
          if keyword.lower() in article_text.lower():
            if link not in output:
              output.append([link,keyword.lower(), article_text])
            return output
        if keyword.lower() in title_snippet.lower():
          if link not in output:
            output.append([link,keyword.lower()])
          print("link and keyword for title: ", link, keyword)    
          return output
      return output
    
    filtered = {}
    better_filter = {}
    if len(search_results) > 0:
      print(search_results)
      for link in search_results:
          # if is_trusted_link(link):
          #   if link not in filtered:
          #     filtered.append(link)
          # else:
          print(link)
          if link:    
            output_link = is_relevant_title_snippet(link,saveLinkFolder, accession)
            print("output link: ")
            print(output_link)
            for out_link in output_link:
              if isinstance(out_link,list) and len(out_link) > 1:
                print(out_link)
                kw = out_link[1]
                if accession and kw == accession.lower():
                  if len(out_link) == 2:
                    better_filter[out_link[0]] = ""
                  elif len(out_link) == 3:
                    # save article
                    better_filter[out_link[0]] = out_link[2]
                if len(out_link) == 2:
                  better_filter[out_link[0]] = ""
                elif len(out_link) == 3:
                  # save article
                  better_filter[out_link[0]] = out_link[2]
              else: filtered[out_link] = ""
          print("done with link and here is filter: ",filtered)      
    if better_filter:
      filtered = better_filter                  
    return filtered

def smart_google_search(metadata):
  queries = smart_google_queries(metadata)
  links = []
  for q in queries:
      #print("\nπŸ” Query:", q)
      results = mtdna_classifier.search_google_custom(q,2)
      for link in results:
          #print(f"- {link}")
          if link not in links:
              links.append(link)
  #filter_links = filter_links_by_metadata(links)
  return links
# Method 2: Prompt LLM better or better ai search api with all
# the total information from even ncbi and all search