dhanvanth183 commited on
Commit
07bd23e
·
1 Parent(s): 91d5d59

Added Local code for Invitation Generator

Browse files
LLM_handler.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_community.chat_models import ChatOpenAI
3
+ from langchain_groq import ChatGroq
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ class LLMHandler:
9
+ def __init__(self, model_name="gpt-4", provider="openai"):
10
+ self.provider = provider
11
+
12
+ if provider == "openai":
13
+ self.api_key = os.getenv("OPENAI_API_KEY")
14
+ if not self.api_key:
15
+ raise ValueError("OPENAI_API_KEY environment variable not set.")
16
+ self.model = ChatOpenAI(api_key=self.api_key, model=model_name)
17
+
18
+ elif provider == "groq":
19
+ self.api_key = os.getenv("GROQ_API_KEY")
20
+ if not self.api_key:
21
+ raise ValueError("GROQ_API_KEY environment variable not set.")
22
+ self.model = ChatGroq(api_key=self.api_key, model_name=model_name)
23
+
24
+ else:
25
+ raise ValueError("Unsupported provider. Use 'openai' or 'groq'.")
26
+
27
+ def generate_text(self, input_data, user_instruction):
28
+ """
29
+ Generates personalized text using the LLM.
30
+ """
31
+ try:
32
+ prompt = self._build_prompt(input_data, user_instruction)
33
+ response = self.model.generate([prompt])
34
+ return response[0]["text"] # Adjust this depending on response format
35
+ except Exception as e:
36
+ print(f"Error during text generation: {e}")
37
+ return "Error generating response"
38
+
39
+ @staticmethod
40
+ def _build_prompt(input_data, user_instruction):
41
+ """
42
+ Builds a structured prompt for the LLM.
43
+ """
44
+ context = (
45
+ f"Name: {input_data.get('Name')}\n"
46
+ f"Job Title: {input_data.get('Job Title')}\n"
47
+ f"Organization: {input_data.get('Organization', input_data.get('Organisation'))}\n"
48
+ f"Area of Interest: {input_data.get('Area of Interest')}\n"
49
+ f"Category: {input_data.get('Category')}\n"
50
+ )
51
+ return f"{user_instruction}\n\nContext:\n{context}"
__pycache__/LLM_handler.cpython-311.pyc ADDED
Binary file (3.26 kB). View file
 
__pycache__/llm_merger.cpython-311.pyc ADDED
Binary file (4.91 kB). View file
 
__pycache__/openai_llms.cpython-311.pyc ADDED
Binary file (3.23 kB). View file
 
__pycache__/query_handler.cpython-311.pyc ADDED
Binary file (4.12 kB). View file
 
llm_merger.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langchain_groq import ChatGroq
4
+ from openai import OpenAI
5
+
6
+ load_dotenv()
7
+
8
+
9
+ class PrimaryLLMHandler:
10
+ def __init__(self, model_name="gpt-4o-mini"):
11
+ """
12
+ Initializes the Primary LLM Handler (GPT0-mini).
13
+ """
14
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
15
+ if not self.openai_api_key:
16
+ raise ValueError("OPENAI_API_KEY environment variable not set.")
17
+
18
+ self.client = OpenAI(api_key=self.openai_api_key)
19
+ self.model_name = model_name
20
+
21
+ def generate_response(self, user_prompt, data):
22
+ """
23
+ Generates a response using the primary LLM.
24
+ """
25
+ prompt = (
26
+ f"You are a professional AI model tasked with writing personalized invite texts "
27
+ f"that are concise (less than 40 words), brochure-suitable, and tailored as per the category in the given sample."
28
+ f"\n\n"
29
+ f"User prompt: {user_prompt}\n\n"
30
+ f"Details of the individual:\n"
31
+ f"- Name: {data['Name']}\n"
32
+ f"- Job Title: {data['Job Title']}\n"
33
+ f"- Organisation: {data['Organisation']}\n"
34
+ f"- Area of Interest: {data['Area of Interest']}\n"
35
+ f"- Category: {data['Category']}\n\n"
36
+ f"The response should start with 'Hello {data['Name']}'."
37
+ f"Ensure the tone aligns with the instructions. STRICTLY give only one response."
38
+ )
39
+
40
+ completion = self.client.chat.completions.create(
41
+ model=self.model_name,
42
+ messages=[
43
+ {"role": "system", "content": "You are a professional assistant AI."},
44
+ {"role": "user", "content": prompt},
45
+ ],
46
+ )
47
+
48
+ return completion.choices[0].message.content.strip()
49
+
50
+
51
+ class ValidatorLLMHandler:
52
+ def __init__(self, model_name="gemma2-9b-it"):
53
+ """
54
+ Initializes the Validator LLM Handler (Llama 3.3 8B).
55
+ """
56
+ self.groq_api_key = os.getenv("GROQ_API_KEY")
57
+ if not self.groq_api_key:
58
+ raise ValueError("GROQ_API_KEY environment variable not set.")
59
+
60
+ self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name)
61
+
62
+ def validate_and_correct_response(self, user_prompt, original_response, data):
63
+ """
64
+ Validates and corrects the response using the secondary LLM.
65
+ """
66
+ validation_prompt = (
67
+ f"You are a professional AI model tasked with validating and correcting AI-generated texts. "
68
+ f"The original response must align strictly with the provided user prompt and input details. "
69
+ f"If the response fails to meet the requirements, generate a corrected version."
70
+ f"\n\n"
71
+ f"User prompt: {user_prompt}\n\n"
72
+ f"Details of the individual:\n"
73
+ f"- Name: {data['Name']}\n"
74
+ f"- Job Title: {data['Job Title']}\n"
75
+ f"- Organisation: {data['Organisation']}\n"
76
+ f"- Area of Interest: {data['Area of Interest']}\n"
77
+ f"- Category: {data['Category']}\n\n"
78
+ f"Original response: {original_response}\n\n"
79
+ f"Instructions:\n"
80
+ f"- If the original response aligns with the user prompt and input details, reply with 'Valid Response'.\n"
81
+ f"- Otherwise, provide a corrected version starting with 'Hello {data['Name']}'.\n"
82
+ f"- Keep it concise (less than 40 words) and brochure-suitable.\n"
83
+ )
84
+
85
+ response = self.llm.invoke(validation_prompt)
86
+ return response.content.strip()
main.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ #from query_handler import LLMHandler
4
+ from openai_llms import LLMHandler
5
+
6
+ def main():
7
+ """
8
+ Main function to process input CSV, query LLM, and save responses.
9
+ """
10
+ # Ask user for input CSV file path and user prompt
11
+ #input_csv = input("Enter the path to the input CSV file: ").strip()
12
+ input_csv = "D:\Projects\Liminal\InviteAI\Test_sample.csv"
13
+ if not os.path.exists(input_csv):
14
+ print(f"Error: File '{input_csv}' not found.")
15
+ return
16
+ user_prompt = input("Enter your user prompt: ").strip()
17
+
18
+ # Output CSV file path
19
+ output_csv = "D:\Projects\Liminal\InviteAI\Response_sample.csv"
20
+
21
+ # Check if the input file exists
22
+ if not os.path.exists(input_csv):
23
+ print(f"Error: File '{input_csv}' not found.")
24
+ return
25
+
26
+ # Initialize the LLM handler
27
+ llm_handler = LLMHandler()
28
+ #llm_handler = LLMOpenAI()
29
+
30
+ # Read the input CSV and process each instance
31
+ with open(input_csv, mode="r", newline="", encoding="utf-8") as infile:
32
+ reader = csv.DictReader(infile)
33
+ fieldnames = reader.fieldnames + ["Generated Text"]
34
+
35
+ rows = []
36
+ for row in reader:
37
+ # Generate response for the current row
38
+ try:
39
+ response = llm_handler.generate_response(user_prompt, row)
40
+ row["Generated Text"] = response
41
+ rows.append(row)
42
+ except Exception as e:
43
+ print(f"Error generating response for UID {row.get('UID')}: {e}")
44
+ row["Generated Text"] = "Error generating response"
45
+ rows.append(row)
46
+
47
+ # Save the updated rows to the output CSV
48
+ with open(output_csv, mode="w", newline="", encoding="utf-8") as outfile:
49
+ writer = csv.DictWriter(outfile, fieldnames=fieldnames)
50
+ writer.writeheader()
51
+ writer.writerows(rows)
52
+
53
+ print(f"Responses saved to '{output_csv}'.")
54
+
55
+ if __name__ == "__main__":
56
+ main()
openai_llms.py CHANGED
@@ -1,8 +1,8 @@
1
  from openai import OpenAI
2
- #from dotenv import load_dotenv
3
  import os
4
 
5
- #load_dotenv()
6
 
7
 
8
  class LLMHandler:
 
1
  from openai import OpenAI
2
+ from dotenv import load_dotenv
3
  import os
4
 
5
+ load_dotenv()
6
 
7
 
8
  class LLMHandler:
query_handler.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_groq import ChatGroq
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+ class LLMHandler:
8
+ def __init__(self, model_name="llama-3.3-70b-versatile"):
9
+ """
10
+ Initializes the LLMHandler with the specified Groq model.
11
+ """
12
+ self.groq_api_key = os.getenv("GROQ_API_KEY")
13
+ if not self.groq_api_key:
14
+ raise ValueError("GROQ_API_KEY environment variable not set.")
15
+
16
+ # Initialize Groq LLM client
17
+ self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name)
18
+
19
+ def generate_response(self, user_prompt, data):
20
+ """
21
+ Generate a concise response using the LLM based on user prompt and data.
22
+ """
23
+ # Create the full prompt using user input and instance data
24
+ prompt = (
25
+ f"You are a professional AI model tasked with writing personalized invite texts "
26
+ f"that are concise (less than 40 words), brochure-suitable, and tailored as per the category in the given sample."
27
+ f"\n\n"
28
+ f"Consider the user prompt: {user_prompt}\n\n"
29
+ f"Details of the individual:\n"
30
+ f"- Name: {data['Name']}\n"
31
+ f"- Job Title: {data['Job Title']}\n"
32
+ f"- Organisation: {data['Organisation']}\n"
33
+ f"- Area of Interest: {data['Area of Interest']}\n"
34
+ f"- Category: {data['Category']}\n\n"
35
+ f"The response can start with 'Hello {data['Name']}'."
36
+ f"Write a personalized invitation text for this individual, ensuring the tone and purpose align with the user's instructions."
37
+ f"STRICTLY give only one response for the category the sample belongs to."
38
+ f"Do NOT mention the category in the response."
39
+ f"NO PREAMBLE."
40
+ )
41
+
42
+ # Query the LLM and return the response
43
+ response = self.llm.invoke(prompt)
44
+ return response.content.strip()
45
+
46
+ def validate_and_correct_response(self, user_prompt, original_response, data):
47
+ """
48
+ Use a secondary LLM to validate and correct the response.
49
+ """
50
+ # Initialize the second LLM (validator)
51
+ validator = ChatGroq(
52
+ groq_api_key=self.groq_api_key,
53
+ model_name="gemma2-9b-it"
54
+ )
55
+
56
+ # Validation prompt
57
+ validation_prompt = (
58
+ f"You are a professional AI model tasked with validating and correcting AI-generated texts. "
59
+ f"The original response must align strictly with the provided user prompt and input details. "
60
+ f"If the response fails to meet the requirements, generate a corrected response. "
61
+ f"\n\n"
62
+ f"User prompt: {user_prompt}\n\n"
63
+ f"Details of the individual:\n"
64
+ f"- Name: {data['Name']}\n"
65
+ f"- Job Title: {data['Job Title']}\n"
66
+ f"- Organisation: {data['Organisation']}\n"
67
+ f"- Area of Interest: {data['Area of Interest']}\n"
68
+ f"- Category: {data['Category']}\n\n"
69
+ f"Original response: {original_response}\n\n"
70
+ f"Instructions:\n"
71
+ f"- If the original response is correct, reply with 'Valid Response'.\n"
72
+ f"- Otherwise, provide a corrected version."
73
+ f"- The corrected version should start with 'Hello {data['Name']}'."
74
+ f"- The corrected version is concise (less than 40 words), brochure-suitable, and tailored as per the Category"
75
+ f"- NO PREAMBLE "
76
+ )
77
+
78
+ # Query the validator LLM
79
+ validation_response = validator.invoke(validation_prompt)
80
+ return validation_response.content.strip()