Spaces:
Sleeping
Sleeping
File size: 6,087 Bytes
324113f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
import requests
import json
import random
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from langchain_community.document_loaders import PyPDFLoader
from langdetect import detect_langs
import requests
from PyPDF2 import PdfReader
from io import BytesIO
from langchain_community.document_loaders import WebBaseLoader
from langchain_google_genai import ChatGoogleGenerativeAI
import logging
from pymongo import MongoClient
# Mongo Connections
# srv_connection_uri = "mongodb+srv://adityasm1410:uOh6i11AYFeKp4wd@patseer.5xilhld.mongodb.net/?retryWrites=true&w=majority&appName=Patseer"
# client = MongoClient(srv_connection_uri)
# db = client['embeddings']
# collection = db['data']
# API Urls -----
# main_url = "http://127.0.0.1:5000/search/all"
main_url = "http://127.0.0.1:8000/search/all"
# main_product = "Samsung Galaxy s23 ultra"
# Revelevance Checking Models -----
gemini = ChatGoogleGenerativeAI(model="gemini-1.0-pro-001",google_api_key='AIzaSyCo-TeDp0Ou--UwhlTgMwCoTEZxg6-v7wA',temperature = 0.1)
gemini1 = ChatGoogleGenerativeAI(model="gemini-1.0-pro-001",google_api_key='AIzaSyAtnUk8QKSUoJd3uOBpmeBNN-t8WXBt0zI',temperature = 0.1)
gemini2 = ChatGoogleGenerativeAI(model="gemini-1.0-pro-001",google_api_key='AIzaSyBzbZQBffHFK3N-gWnhDDNbQ9yZnZtaS2E',temperature = 0.1)
gemini3 = ChatGoogleGenerativeAI(model="gemini-1.0-pro-001",google_api_key='AIzaSyBNN4VDMAOB2gSZha6HjsTuH71PVV69FLM',temperature = 0.1)
API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-xxl"
headers = {"Authorization": "Bearer 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"}
# Error Debug
logging.basicConfig(level=logging.INFO)
# Global Var --------
data = False
seen = set()
existing_products_urls = set('123')
def get_links(main_product,api_key):
params = {
"API_KEY": f"{api_key}",
"product": f"{main_product}",
}
# Flask
response = requests.get(main_url, params=params)
# FastAPI
# response = requests.post(main_url, json=params)
if response.status_code == 200:
results = response.json()
with open('data.json', 'w') as f:
json.dump(results, f)
else:
print(f"Failed to fetch results: {response.status_code}")
def language_preprocess(text):
try:
if detect_langs(text)[0].lang == 'en':
return True
return False
except:
return False
def relevant(product, similar_product, content):
try:
payload = { "inputs": f'''Do you think that the given content is similar to {similar_product} and {product}, just Respond True or False \nContent for similar product: {content}'''}
# response = requests.post(API_URL, headers=headers, json=payload)
# output = response.json()
# return bool(output[0]['generated_text'])
model = random.choice([gemini,gemini1,gemini2,gemini3])
result = model.invoke(f'''Do you think that the given content is similar to {similar_product} and {product}, just Respond True or False \nContent for similar product: {content}''')
return bool(result)
except:
return False
def download_pdf(url, timeout=10):
try:
response = requests.get(url, timeout=timeout)
response.raise_for_status()
return BytesIO(response.content)
except requests.RequestException as e:
logging.error(f"PDF download error: {e}")
return None
def extract_text_from_pdf(pdf_file, pages):
reader = PdfReader(pdf_file)
extracted_text = ""
l = len(reader.pages)
try:
for page_num in pages:
if page_num < l:
page = reader.pages[page_num]
extracted_text += page.extract_text() + "\n"
else:
print(f"Page {page_num} does not exist in the document.")
return extracted_text
except:
return 'हे चालत नाही'
def extract_text_online(link):
loader = WebBaseLoader(link)
pages = loader.load_and_split()
text = ''
for page in pages[:3]:
text+=page.page_content
return text
def process_link(link, main_product, similar_product):
if link in seen:
return None
seen.add(link)
try:
if link[-3:]=='.md' or link[8:11] == 'en.':
text = extract_text_online(link)
else:
pdf_file = download_pdf(link)
text = extract_text_from_pdf(pdf_file, [0, 2, 4])
if language_preprocess(text):
if relevant(main_product, similar_product, text):
print("Accepted -",link)
return link
except:
pass
print("Rejected -",link)
return None
def filtering(urls, main_product, similar_product, link_count):
res = []
# print(f"Filtering Links of ---- {similar_product}")
# Main Preprocess ------------------------------
# with ThreadPoolExecutor() as executor:
# futures = {executor.submit(process_link, link, main_product, similar_product): link for link in urls}
# for future in concurrent.futures.as_completed(futures):
# result = future.result()
# if result is not None:
# res.append(result)
# return res
count = 0
print(f"--> Filtering Links of - {similar_product}")
for link in urls:
if link in existing_products_urls:
res.append((link,1))
count+=1
else:
result = process_link(link, main_product, similar_product)
if result is not None:
res.append((result,0))
count += 1
if count == link_count:
break
return res
# Main Functions -------------------------------------------------->
# get_links()
# preprocess()
|