File size: 11,646 Bytes
fe6b125
3caf785
fe6b125
 
ceafe94
 
da35894
 
0ae6627
da35894
 
 
 
 
 
 
 
 
 
 
 
3a5bbff
 
 
da35894
 
 
 
 
 
 
 
 
 
 
 
 
0ae6627
da35894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27719d8
11acf98
da35894
 
 
 
 
 
 
 
 
fe6b125
da35894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe6b125
 
 
 
 
 
3caf785
9e6c18e
eeb44aa
 
 
 
9e6c18e
fe6b125
 
 
 
 
 
 
 
 
 
 
 
 
61b45ec
fe6b125
 
 
 
 
 
 
 
 
 
 
3caf785
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe6b125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8177455
fe6b125
 
d907eb6
8177455
 
 
 
fe6b125
 
 
3caf785
fe6b125
3caf785
fe6b125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3caf785
fe6b125
 
059e360
fe6b125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5894124
fe6b125
 
 
 
 
 
 
 
0cef6e5
 
 
 
 
fe6b125
62600e4
29786ae
0864565
 
 
 
29786ae
0864565
29786ae
 
 
0864565
 
 
 
 
 
 
 
 
 
29786ae
fdb5b3d
29786ae
fdb5b3d
29786ae
 
62600e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3caf785
949a5ce
53dc461
62600e4
 
 
 
53dc461
0cef6e5
e174759
62600e4
9081195
e174759
 
 
 
62600e4
3caf785
3d37fbb
 
bb900bd
 
3d37fbb
 
 
 
 
 
 
 
 
 
 
6c3f97f
3d37fbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62600e4
 
fe6b125
 
 
0864565
fe6b125
 
4027725
 
fe6b125
4027725
fe6b125
2770f0b
 
 
fe6b125
 
50e0c1c
0864565
 
3caf785
0864565
 
 
da35894
 
 
 
 
 
 
 
3caf785
 
 
da35894
 
5894124
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
#####################################
##   
#####################################

from langchain_community.llms import HuggingFaceHub

#from langchain_community.llms import HuggingFaceHub

llm_zephyr_7b_beta = HuggingFaceHub(
    repo_id="HuggingFaceH4/zephyr-7b-beta",
    task="text-generation",
    model_kwargs={
        "max_new_tokens": 512,
        "top_k": 30,
        "temperature": 0.1,
        "repetition_penalty": 1.03,
    },
)

import os
from crewai import Agent, Task, Crew, Process
from crewai_tools.tools import SerperDevTool

#from crewai_tools import SerperDevTool

search_tool = SerperDevTool()

# Define your agents with roles and goals
researcher = Agent(
  role='Senior Research Analyst',
  goal='Uncover cutting-edge developments in AI and data science',
  backstory="""You work at a leading tech think tank.
  Your expertise lies in identifying emerging trends.
  You have a knack for dissecting complex data and presenting actionable insights.""",
  verbose=True,
  allow_delegation=False,
  tools=[search_tool],
  llm=llm_zephyr_7b_beta
  # You can pass an optional llm attribute specifying what mode you wanna use.
  # It can be a local model through Ollama / LM Studio or a remote
  # model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/)
  #
  # import os
  # os.environ['OPENAI_MODEL_NAME'] = 'gpt-3.5-turbo'
  #
  # OR
  #
  # from langchain_openai import ChatOpenAI
  # llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7)
)

writer = Agent(
  role='Tech Content Strategist',
  goal='Craft compelling content on tech advancements',
  backstory="""You are a renowned Content Strategist, known for your insightful and engaging articles.
  You transform complex concepts into compelling narratives.""",
  verbose=True,
  allow_delegation=True,
  llm=llm_zephyr_7b_beta
)

# Create tasks for your agents
task1 = Task(
  description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
  Identify key trends, breakthrough technologies, and potential industry impacts.""",
  expected_output="Full analysis report in bullet points",
  agent=researcher
)

task2 = Task(
  description="""Using the insights provided, develop an engaging blog
  post that highlights the most significant AI advancements.
  Your post should be informative yet accessible, catering to a tech-savvy audience.
  Make it sound cool, avoid complex words so it doesn't sound like AI.""",
  expected_output="Full blog post of at least 4 paragraphs",
  agent=writer
)

# Instantiate your crew with a sequential process
crew = Crew(
  agents=[researcher, writer],
  tasks=[task1, task2],
  verbose=2, # You can set it to 1 or 2 to different logging levels
)

# Get your crew to work!
#result = crew.kickoff()

#print("######################")
#print(result)

##################
###### other models:
# "Trelis/Llama-2-7b-chat-hf-sharded-bf16"
# "bn22/Mistral-7B-Instruct-v0.1-sharded"
# "HuggingFaceH4/zephyr-7b-beta"

# function for loading 4-bit quantized model
def load_model( ):

    model =  HuggingFaceHub(
        repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
        model_kwargs={"max_length": 1048, "temperature":0.2, "max_new_tokens":256, "top_p":0.95, "repetition_penalty":1.0},
    )
    
    return model

##################################################
## vs chat
##################################################
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline

from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma

from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain.vectorstores.faiss import FAISS


from dotenv import load_dotenv
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain


load_dotenv()

def get_vectorstore():
    ''' 
        FAISS
        A FAISS vector store containing the embeddings of the text chunks.
   '''
    model = "BAAI/bge-base-en-v1.5"
    encode_kwargs = {
        "normalize_embeddings": True
    }  # set True to compute cosine similarity
    embeddings = HuggingFaceBgeEmbeddings(
        model_name=model, encode_kwargs=encode_kwargs, model_kwargs={"device": "cpu"}
    )
    # load from disk
    vector_store = Chroma(persist_directory="/home/user/.cache/chroma_db", embedding_function=embeddings)
    return vector_store

def get_vectorstore_from_url(url):
    # get the text in document form
    loader = WebBaseLoader(url)
    document = loader.load()
    
    # split the document into chunks
    text_splitter = RecursiveCharacterTextSplitter()
    document_chunks = text_splitter.split_documents(document)
    ####### 
    ''' 
        FAISS
        A FAISS vector store containing the embeddings of the text chunks.
   '''
    model = "BAAI/bge-base-en-v1.5"
    encode_kwargs = {
        "normalize_embeddings": True
    }  # set True to compute cosine similarity
    embeddings = HuggingFaceBgeEmbeddings(
        model_name=model, encode_kwargs=encode_kwargs, model_kwargs={"device": "cpu"}
    )
    # load from disk
    #vector_store = Chroma(persist_directory="/home/user/.cache/chroma_db", embedding_function=embeddings)
 
    #vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
    vector_store = Chroma.from_documents(document_chunks, embeddings, persist_directory="/home/user/.cache/chroma_db")
    
    all_documents = vector_store.get()['documents']
    total_records = len(all_documents)
    print("Total records in the collection: ", total_records)

    return vector_store

def get_context_retriever_chain(vector_store): 

    llm = load_model( )
    
    retriever = vector_store.as_retriever()
    
    prompt = ChatPromptTemplate.from_messages([
      MessagesPlaceholder(variable_name="chat_history"),
      ("user", "{input}"),
      ("user", "Given the above conversation, generate a search query to look up in order to get information relevant to the conversation")
    ])
    
    retriever_chain = create_history_aware_retriever(llm, retriever, prompt)
    
    return retriever_chain
    
def get_conversational_rag_chain(retriever_chain): 
    
    llm = load_model( )
    
    prompt = ChatPromptTemplate.from_messages([
      ("system", "Du bist eine freundlicher Mitarbeiterin Namens Susie und arbeitest in einenm Call Center. Du beantwortest basierend auf dem Context. Benutze nur den Inhalt des Context. Füge wenn möglich die Quelle hinzu. Antworte mit: Ich bin mir nicht sicher. Wenn die Antwort nicht aus dem Context hervorgeht. Antworte auf Deutsch, bitte? CONTEXT:\n\n{context}"),
      MessagesPlaceholder(variable_name="chat_history"),
      ("user", "{input}"),
    ])
    
    stuff_documents_chain = create_stuff_documents_chain(llm,prompt)
    
    return create_retrieval_chain(retriever_chain, stuff_documents_chain)


###################

###################
import gradio as gr


chat_history = []     # Set your chat history here

# Define your function here
def get_response(user_input):

    vs = get_vectorstore()
    chat_history =[]
    retriever_chain = get_context_retriever_chain(vs)
    conversation_rag_chain = get_conversational_rag_chain(retriever_chain)
    
    response = conversation_rag_chain.invoke({
        "chat_history": chat_history,
        "input": user_input
    })
    #print("get_response " +response)
    res = response['answer']
    parts = res.split(" Assistant: ")
    last_part = parts[-1]
    return last_part


###############
#####
#####
#####
####
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

app = FastAPI()

# middlewares to allow cross orgin communications
app.add_middleware(
    CORSMiddleware,
    allow_origins=['*'], 
    allow_credentials=True, 
    allow_methods=['*'], 
    allow_headers=['*'],
)


@app.post("/generate/")
def generate(user_input, history=[]):
    print("----yuhu -----")   
    return get_response(user_input, history)
##################

def history_to_dialog_format(chat_history: list[str]):
    dialog = []
    if len(chat_history) > 0:
        for idx, message in enumerate(chat_history[0]):
            role = "user" if idx % 2 == 0 else "assistant"
            dialog.append({
                "role": role,
                "content": message,
            })
    return dialog

def get_response(message, history):
    dialog = history_to_dialog_format(history)
    dialog.append({"role": "user", "content": message})

      # Define the prompt as a ChatPromptValue object
    #user_input = ChatPromptValue(user_input)
    
    # Convert the prompt to a tensor
    #input_ids = user_input.tensor
    

    #vs = get_vectorstore_from_url(user_url, all_domain)
    vs = get_vectorstore()
  
    history =[]
    retriever_chain = get_context_retriever_chain(vs)
    conversation_rag_chain = get_conversational_rag_chain(retriever_chain)
    
    response = conversation_rag_chain.invoke({
        "chat_history": history,
        "input": message  + " Assistant: ",
        "chat_message": message + " Assistant: "
    })
    #print("get_response " +response)
    res = response['answer']
    parts = res.split(" Assistant: ")
    last_part = parts[-1]
    return last_part#[-1]['generation']['content']    




 

######

########
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urljoin

def get_links_from_page(url, visited_urls, domain_links):
    if url in visited_urls:
        return

    if len(visited_urls) > 25:
        return

    visited_urls.add(url)
    print(url)
    response = requests.get(url)

    if response.status_code == 200:
        soup = BeautifulSoup(response.content, 'html.parser')
        base_url = urlparse(url).scheme + '://' + urlparse(url).netloc
        links = soup.find_all('a', href=True)

        for link in links:
            href = link.get('href')
            absolute_url = urljoin(base_url, href)
            parsed_url = urlparse(absolute_url)

            if parsed_url.netloc == urlparse(url).netloc:
                domain_links.add(absolute_url)
                get_links_from_page(absolute_url, visited_urls, domain_links)

    else:
        print(f"Failed to retrieve content from {url}. Status code: {response.status_code}")

def get_all_links_from_domain(domain_url):
    visited_urls = set()
    domain_links = set()
    get_links_from_page(domain_url, visited_urls, domain_links)
    return domain_links





def simple(text:str):
  return text +" hhhmmm "

fe_app = gr.ChatInterface(
    fn=get_response,
    #fn=simple,
   # inputs=["text"],
   # outputs="text",
    title="Chat with Websites",
    description="Schreibe hier deine Frage rein...",
    #allow_flagging=False
    retry_btn=None,
    undo_btn=None,
    clear_btn=None 
)

#fe_app.launch(debug=True, share=True) 



# load the model asynchronously on startup and save it into memory 
@app.on_event("startup")
async def startup():
    # Get your crew to work!
    result = crew.kickoff()

    print("######################")
    print(result)
    #domain_url = 'https://globl.contact/'
    #links = get_all_links_from_domain(domain_url)
    #print("Links from the domain:", links)
    
    #########
    # Assuming visited_urls is a list of URLs
    #for url in links:
    #    vs = get_vectorstore_from_url(url)
    #load_model()