File size: 1,330 Bytes
5681a09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from huggingface_hub import InferenceClient
import os

class LLMHandler:
    def __init__(self):
        self.client = InferenceClient(
            model="mistralai/Mistral-7B-Instruct-v0.3",  # Updated to v0.3
            token=os.getenv("HF_TOKEN")
        )
    
    def get_deadline_suggestion(self, task_description):
        prompt = f"""You are a task management assistant. Analyze the task below and provide a realistic deadline suggestion.

Task Description:
"{task_description}"

Follow this format:
1. **Estimated Hours**: [X]
2. **Recommended Deadline**: [YYYY-MM-DD HH:MM]
3. **Priority**: [High/Medium/Low]
4. **Notes**: [Brief explanation]

Example:
1. **Estimated Hours**: 8
2. **Recommended Deadline**: 2024-04-10 18:00
3. **Priority**: High
4. **Notes**: Research papers typically take 5–7 days for 5000 words.

Now analyze the task and return only the structured output."""
        
        try:
            response = self.client.chat.completions.create(
                messages=[{"role": "user", "content": prompt}],
                max_tokens=500,
                temperature=0.3
            )
            return response.choices[0].message.content
        except Exception as e:
            return f"LLM Error: {str(e)}. Please check HF_TOKEN or try again later."

# Singleton instance
llm = LLMHandler()