File size: 2,088 Bytes
d92c861
 
 
 
 
4442b70
d92c861
 
 
 
 
 
 
 
 
 
 
 
 
 
920f2ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0778be
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.responses import HTMLResponse
from fastapi.responses import StreamingResponse
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from io import StringIO
import os



app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

import nest_asyncio
import asyncio
from playwright.async_api import async_playwright

# Apply nest_asyncio to allow nested asyncio.run() calls
nest_asyncio.apply()

async def scrape_links():
    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=True)
        page = await browser.new_page()

        # Block unnecessary resources to speed up loading
        await page.route("**/*", lambda route: route.continue_() if route.request.resource_type in ["document", "script"] else route.abort())

        # Open the target website
        await page.goto('https://www.fool.com/earnings/call-transcripts/2024/01/24/tesla-tsla-q4-2023-earnings-call-transcript/', wait_until='domcontentloaded')

        # Wait for a short time to ensure dynamic content is loaded
        await page.wait_for_timeout(10)

        # Extract all links
        links = await page.query_selector_all('a')
        result = []
        for link in links:
            href = await link.get_attribute('href')
            result.append({'href': href})

        # Extract all text content
        elements = await page.query_selector_all('body *')
 
        for element in elements:
            text_content = await element.text_content()
            if text_content and text_content.strip():
                result.append({'text': text_content.strip()})

        await browser.close()
        return result



@app.post("/get_webscrapet_data")
async def get_webscrapet_data(url):
        # Run the scraping function
        results = asyncio.run(scrape_links())
        print(results)
        return results