limitedonly41's picture
Update app.py
004256b verified
import gradio as gr
import torch
import spaces
import logging
from deep_translator import GoogleTranslator
import pandas as pd
from tqdm import tqdm
import urllib
from bs4 import BeautifulSoup
import asyncio
from torch.amp import autocast
from curl_cffi.requests import AsyncSession
from tqdm.asyncio import tqdm
from fake_headers import Headers
from urllib.parse import urlparse, urlunparse
from deep_translator import GoogleTranslator
# Limit the number of concurrent workers
CONCURRENT_WORKERS = 5
semaphore = asyncio.Semaphore(CONCURRENT_WORKERS)
# Configure logging to write messages to a file
logging.basicConfig(filename='app.log', level=logging.ERROR)
# Configuration
max_seq_length = 2048
dtype = None # Auto detection of dtype
load_in_4bit = True # Use 4-bit quantization to reduce memory usage
# peft_model_name = "limitedonly41/website_qwen2_7b_2"
# peft_model_name = "limitedonly41/website_mistral7b_v02"
peft_model_name = "unsloth/mistral-7b-instruct-v0.3-bnb-4bit"
# Initialize model and tokenizer variables
model = None
tokenizer = None
def get_main_page_url(url):
try:
# Parse the given URL
parsed_url = urlparse(url)
# Construct the main page URL (scheme + netloc)
print(parsed_url.netloc)
main_page_url = urlunparse((parsed_url.scheme, parsed_url.netloc, '', '', '', ''))
return main_page_url
except Exception as e:
return f"Error processing URL: {e}"
def translate_text(text):
try:
text = text[:4990] # Limit the text length to avoid API errors
translated_text = GoogleTranslator(source='auto', target='en').translate(text)
return translated_text
except Exception as e:
print(f"An error occurred during translation: {e}")
return None
async def get_page_bs4(url: str, headers):
wrong_result = {
'url': None,
'title': None,
'description': None,
'keywords': None,
'h1': None,
'h2': None,
'h3': None,
'paragraphs': None,
'text': None,
'links': None
}
async with semaphore: # Limit concurrency
async with AsyncSession() as session:
wrong_result['url'] = url
try:
response = await session.get(url, headers=headers, impersonate="chrome", timeout=60, verify=False)
except:
try:
response = await session.get(url, impersonate="chrome", timeout=60, verify=False)
except:
return wrong_result
if response.status_code != 200:
return wrong_result
soup = BeautifulSoup(response.text, "html.parser")
try:
title = soup.find('title').text if soup.find('title') else ''
except:
title = ''
try:
description = soup.find('meta', attrs={'name': 'description'})
description = description.get("content") if description else ''
except:
description = ''
try:
keywords = soup.find('meta', attrs={'name': 'keywords'})
keywords = keywords.get("content") if keywords else ''
except:
keywords = ''
try:
h1 = " ".join(h.text for h in soup.find_all('h1'))
except:
h1 = ''
try:
h2 = " ".join(h.text for h in soup.find_all('h2'))
except:
h2 = ''
try:
h3 = " ".join(h.text for h in soup.find_all('h3'))
except:
h3 = ''
try:
paragraphs = " ".join(p.text for p in soup.find_all('p'))
except:
paragraphs = ''
try:
menu_tags = []
navs = soup.find_all('nav')
uls = soup.find_all('ul')
ols = soup.find_all('ol')
for tag in navs + uls + ols:
menu_tags.extend(tag.find_all('a'))
menu_items = [{'text': tag.get_text(strip=True), 'href': tag.get('href')} for tag in menu_tags if tag.get_text(strip=True)]
all_menu_texts = ', '.join([item['text'] for item in menu_items])
except:
all_menu_texts = ''
# all_content = f"{url} {title} {description} {h1} {h2} {h3} {paragraphs}"[:4999]
all_content = f" {url} {title} {description} {h1} {h2} {h3} {paragraphs} "[:4999]
if len(all_content) < 150:
all_content = f" {url} {title} {description} {h1} {h2} {h3} {paragraphs} {all_menu_texts}"[:4999]
# all_content = f" {url} {title} {description} {keywords} {h1} {h2} {h3} {paragraphs} "[:4999]
# all_content = f" url: {url} title: {title} description: {description} keywords: {keywords} h1: {h1} h2: {h2} h3: {h3} p: {paragraphs} links: {all_menu_texts}"[:4999]
result = {
'url': url,
'title': title,
'description': description,
'keywords': keywords,
'h1': h1,
'h2': h2,
'h3': h3,
'paragraphs': paragraphs,
'text': all_content,
'links': all_menu_texts
}
return result
async def main(urls_list):
headers_list = [Headers(browser="chrome", os="win").generate() for _ in range(len(urls_list) // 5 + 1)]
tasks = []
# Assign headers to each task, rotating every 5 URLs
for i, url in enumerate(urls_list):
headers = headers_list[i // 5] # Rotate headers every 5 URLs
tasks.append(get_page_bs4(url, headers))
# Use tqdm to show progress
results = []
for coro in tqdm(asyncio.as_completed(tasks), total=len(tasks)):
results.append(await coro)
return results
def scrape_websites(urls_list):
try:
import nest_asyncio
nest_asyncio.apply()
loop = asyncio.get_event_loop()
result_data = loop.run_until_complete(main(urls_list))
# print(len(result_data))
except RuntimeError:
result_data = asyncio.run(main(urls_list))
return result_data
@spaces.GPU()
def classify_website(url):
from unsloth import FastLanguageModel # Import moved to the top for model loading
global model, tokenizer # Declare model and tokenizer as global variables
if model is None or tokenizer is None:
# Load the model and tokenizer during initialization (in the main process)
model, tokenizer = FastLanguageModel.from_pretrained(
model_name=peft_model_name,
max_seq_length=max_seq_length,
dtype=dtype,
load_in_4bit=load_in_4bit,
)
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
main_page_url = get_main_page_url(url)
urls = [main_page_url]
final_ans_dict = {}
print('before scrape_websites')
result_data = scrape_websites(urls)
data = result_data[0]
url = data['url']
text = data['text']
try:
if len(text) < 150:
# print('Short ', text)
prediction = 'Short'
final_ans_dict[url] = prediction
except:
# print(translated)
prediction = 'NotScraped'
final_ans_dict[url] = prediction
translated = translate_text(text)
# print(translated)
try:
if len(translated) < 150:
# print(translated)
pred = 'Short'
return pred
except:
# print(translated)
pred = 'NotScraped'
return pred
example_input = """https://extensionesdepelo.net/ Hair extensions in Valencia ▶ The best prices for natural hair extensions in Valencia Hair Extensions in Valencia ▶ Professional and Natural ⭐ Hair with more volume and length. Perfect Hair Extensions About us Our works Our salon services Hair extensions Hair removal Reviews of satisfied customers Hair palette colors Contacts Fill out the form Over 7 years of experience in hair extensions, we select the color and texture of hair to match your hair so that the hair extensions look natural Gentle and safe hair extensions so that your hair does not suffer. In a few hours, we will transform rare, weak and short hair into luxurious long and healthy hair. We work exclusively with high-quality hair. Thanks to micro and nano capsules, the extensions will be invisible and comfortable. Free consultation before each extension. We use high-quality hair, time-tested
We use small, neat, comfortable
capsules and make an unnoticeable transition
We consult
and answer all
questions before and after extensions
Safe extensions without discomfort in wearing. Due to the correct placement of the capsules, the result of the extension is invisible.  A procedure that requires the attention and accuracy of the master. With proper hair removal, the structure of native hair is not damaged We provide a large selection of colors Ask the master a question and we will answer all your questions We work in the hot Italian extension technique. This technique is the most comfortable because it does not require much self-care. We recommend doing a correction every 2-3 months. With the Italian technique, you can do various hairstyles and even make ponytails. To form capsules, we use good refractory keratin.  We work with a proven supplier of natural Slavic hair. We have a large selection of colors, lengths and hair structures."""
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
Describe the topic of website from its text :
### ExampleInput:
{}
### ExampleResponse: The website of the master of hair extensions.
### Input:
{}
### Response:"""
prompt = alpaca_prompt.format(example_input,translated)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with autocast(device_type='cuda'):
inputs = tokenizer(prompt, return_tensors="pt").to(device)
outputs = model.generate(**inputs, max_new_tokens=128, use_cache=True)
# inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
# outputs = model.generate(inputs.input_ids, max_new_tokens=64, use_cache=True)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
final_answer = summary.split("### Response:")[1].strip()
return f"{main_page_url}: {final_answer}"
# Create a Gradio interface
iface = gr.Interface(
fn=classify_website,
inputs="text",
outputs="text",
title="Website Topic",
description="Enter a URL to get a topic summary of the website content."
)
# Launch the interface
iface.launch()