File size: 6,520 Bytes
b9c076d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e5c03b7
 
 
b9c076d
 
e5c03b7
b9c076d
e5c03b7
 
 
 
 
 
 
 
 
b9c076d
e5c03b7
 
b9c076d
 
 
 
 
e5c03b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9c076d
e5c03b7
 
 
 
 
 
 
 
 
 
b9c076d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
# core/stock_analysis.py

import requests
import json
from datetime import datetime, timedelta
from tradingview_ta import TA_Handler, Interval
from langchain.document_loaders import WebBaseLoader
from langchain.docstore.document import Document
from bs4 import BeautifulSoup
from GoogleNews import GoogleNews
from langchain.prompts import PromptTemplate
from langchain.chains import StuffDocumentsChain, LLMChain

def clean_google_news_url(url: str):
    for ext in [".html", ".cms"]:
        if ext in url:
            return url.split(ext)[0] + ext
    return url.split("&")[0]

def get_google_news_documents(query: str, max_articles: int = 10, timeout: int = 10):
    googlenews = GoogleNews(lang="en")
    end_date = datetime.today()
    start_date = end_date - timedelta(days=2)
    googlenews.set_time_range(start_date.strftime("%m/%d/%Y"), end_date.strftime("%m/%d/%Y"))
    googlenews.search(query)
    articles = googlenews.result()

    documents = []
    for article in articles[:max_articles]:
        url = clean_google_news_url(article.get("link"))
        try:
            response = requests.get(url, timeout=timeout, headers={"User-Agent": "Mozilla/5.0"})
            response.raise_for_status()
            soup = BeautifulSoup(response.text, "html.parser")
            paragraphs = soup.find_all("p")
            content = "\n".join([p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True)])
            if content and len(content) > 200:
                doc = Document(
                    page_content=content,
                    metadata={
                        "source": "Google News",
                        "title": article.get("title", ""),
                        "published": article.get("date", ""),
                        "link": url,
                    }
                )
                documents.append(doc)
        except Exception:
            continue
    return documents

def analyze_stock(ticker, llm):
    try:
        handler = TA_Handler(symbol=ticker, screener="india", exchange="NSE", interval=Interval.INTERVAL_1_DAY)
        summary = handler.get_analysis().summary
    except Exception:
        return {"error": "Invalid ticker or failed to fetch trading data"}

    urls = [
        f"https://www.google.com/finance/quote/{ticker}:NSE?hl=en",
        f"https://in.tradingview.com/symbols/NSE-{ticker}/",
        f"https://in.tradingview.com/symbols/NSE-{ticker}/news/",
        f"https://in.tradingview.com/symbols/NSE-{ticker}/minds/"
    ]

    loader = WebBaseLoader(urls)
    web_docs = loader.load()

    news_docs = get_google_news_documents(f"Trending News for {ticker}", max_articles=10)
    docs = web_docs + news_docs

    prompt_template = """You are an expert Stock Market Trader specializing in stock market insights derived from fundamental analysis, analytical trends, profit-based evaluations, news indicators from different sites and detailed company financials. 
    Using your expertise, please analyze the stock based on the provided context below.
    
    Context:
    {input_documents}
    
    Task:
    Summarize the stock based on its historical and current data. Keep it CONCISE & BRIEF.
    Evaluate the stock on the following parameters:
    1. Company Fundamentals: Assess the stock's intrinsic value, growth potential, and financial health.
    2. Current & Future Price Trends: Analyze historical price movements and current price trends.
    3. News and Sentiment: Review recent news articles, press releases, and social media sentiment.
    4. Red Flags: Identify any potential risks or warning signs.
    5. Provide a rating for the stock on a scale of 1 to 10.
    6. Advise if the stock is a good buy for the next 1,5, 10 weeks.
    7. Suggest at what price we need to buy and hold or sell the stock

    PROVIDE THE DETAILS based on just the FACTS present in the document
    PROVIDE THE DETAILS IN an JSON Object. Stick to the below JSON object
    {{
      "stock_summary": {{
        "company_name": "",
        "ticker": "",
        "exchange": "",
        "description": "",
        "current_price": "",
        "market_cap": "",
        "historical_performance": {{
          "5_day": "",
          "1_month": "",
          "6_months": "",
          "1_year": "",
          "5_years": ""
        }}
      }},
      "evaluation_parameters": {{
        "company_fundamentals": {{
          "assessment": "",
          "key_metrics": {{
            "pe_ratio": "",
            "volume":"",
            "revenue_growth_yoy": "",
            "net_income_growth_yoy": "",
            "eps_growth_yoy": "",
            "dividend_yield": "",
            "balance_sheet": "",
            "return_on_capital": ""
          }}
        }},
        "current_and_future_price_trends": {{
          "assessment": "",
          "historical_trends": "",
          "current_trends": "",
          "technical_analysis_notes": "",
          "technical_indicators":""
        }},
        "news_and_sentiment": {{
          "assessment": "",
          "positive_sentiment": [
            "",
            "",
            ""
          ],
          "negative_sentiment": [
            "",
            "",
            ""
          ]
        }},
        "red_flags": [
          {{
            "flag": "",
            "details": ""
          }},
          {{
            "flag": "",
            "details": ""
          }},
          {{
            "flag": "",
            "details": ""
          }}
        ]
      }},
      "overall_rating": {{
        "rating": "ranging from 1 to 10, 1 being low rated, 10 being highly rated",
        "justification": ""
      }},
      "investment_advice": {{
        "next_1_weeks_outlook": "",
        "next_5_weeks_outlook": "",
        "next_10_weeks_outlook": "",
        "price_action_suggestions": {{
          "buy": "",
          "hold": "",
          "sell": ""
        }}
      }}
    }}
    """

    prompt = PromptTemplate.from_template(prompt_template)
    chain = StuffDocumentsChain(llm_chain=LLMChain(llm=llm, prompt=prompt), document_variable_name="input_documents")
    response = chain.invoke({"input_documents": docs})
    raw = response["output_text"].strip()

    # Clean code block markdown if present
    if raw.startswith("```json"):
        raw = raw[len("```json"):]
    if raw.endswith("```"):
        raw = raw[:-3]

    try:
        return json.loads(raw.strip())
    except json.JSONDecodeError:
        return {"error": "Failed to parse model output", "raw": raw}