code
stringlengths
141
78.9k
apis
sequencelengths
1
23
extract_api
stringlengths
142
73.2k
import re from langchain.agents import AgentOutputParser from langchain.schema import AgentAction, AgentFinish, OutputParserException from typing import Union from cat.mad_hatter.mad_hatter import MadHatter from cat.log import log class ChooseProcedureOutputParser(AgentOutputParser): def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]: # Check if agent should finish if "Final Answer:" in llm_output: return AgentFinish( # Return values is generally always a dictionary with a single `output` key # It is not recommended to try anything else at the moment :) return_values={"output": llm_output.split("Final Answer:")[-1].strip()}, log=llm_output, ) # Parse out the action and action input regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" match = re.search(regex, llm_output, re.DOTALL) if not match: raise OutputParserException(f"Could not parse LLM output: `{llm_output}`") # Extract action action = match.group(1).strip() action_input = match.group(2) if action == "none_of_the_others": return AgentFinish( # Return values is generally always a dictionary with a single `output` key # It is not recommended to try anything else at the moment :) return_values={"output": None}, log=llm_output, ) mh = MadHatter() for Form in mh.forms: if Form.name == action: return AgentFinish( return_values={ "output": None, "form": action }, log=llm_output, ) # Return the action and action input return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
[ "langchain.schema.AgentFinish", "langchain.schema.OutputParserException" ]
[((936, 975), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (945, 975), False, 'import re\n'), ((1551, 1562), 'cat.mad_hatter.mad_hatter.MadHatter', 'MadHatter', ([], {}), '()\n', (1560, 1562), False, 'from cat.mad_hatter.mad_hatter import MadHatter\n'), ((1016, 1084), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{llm_output}`"""'], {}), "(f'Could not parse LLM output: `{llm_output}`')\n", (1037, 1084), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((1260, 1319), 'langchain.schema.AgentFinish', 'AgentFinish', ([], {'return_values': "{'output': None}", 'log': 'llm_output'}), "(return_values={'output': None}, log=llm_output)\n", (1271, 1319), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((1653, 1728), 'langchain.schema.AgentFinish', 'AgentFinish', ([], {'return_values': "{'output': None, 'form': action}", 'log': 'llm_output'}), "(return_values={'output': None, 'form': action}, log=llm_output)\n", (1664, 1728), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')]
from typing import List, Union from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from pydantic import BaseModel, Extra, validator from mindsdb.integrations.handlers.rag_handler.settings import ( DEFAULT_EMBEDDINGS_MODEL, RAGBaseParameters, ) EVAL_COLUMN_NAMES = ( "question", "answers", "context", ) SUPPORTED_EVALUATION_TYPES = ("retrieval", "e2e") GENERATION_METRICS = ("rouge", "meteor", "cosine_similarity", "accuracy") RETRIEVAL_METRICS = ("cosine_similarity", "accuracy") # todo make a separate class for evaluation parameters class WriterLLMParameters(BaseModel): """Model parameters for the Writer LLM API interface""" writer_api_key: str writer_org_id: str = None base_url: str = None model_id: str = "palmyra-x" callbacks: List[StreamingStdOutCallbackHandler] = [StreamingStdOutCallbackHandler()] max_tokens: int = 1024 temperature: float = 0.0 top_p: float = 1 stop: List[str] = [] best_of: int = 5 verbose: bool = False class Config: extra = Extra.forbid arbitrary_types_allowed = True class WriterHandlerParameters(RAGBaseParameters): """Model parameters for create model""" llm_params: WriterLLMParameters generation_evaluation_metrics: List[str] = list(GENERATION_METRICS) retrieval_evaluation_metrics: List[str] = list(RETRIEVAL_METRICS) evaluation_type: str = "e2e" n_rows_evaluation: int = None # if None, evaluate on all rows retriever_match_threshold: float = 0.7 generator_match_threshold: float = 0.8 evaluate_dataset: Union[List[dict], str] = "squad_v2_val_100_sample" class Config: extra = Extra.forbid arbitrary_types_allowed = True use_enum_values = True @validator("generation_evaluation_metrics", allow_reuse=True) def generation_evaluation_metrics_must_be_supported(cls, v): for metric in v: if metric not in GENERATION_METRICS: raise ValueError( f"generation_evaluation_metrics must be one of {', '.join(str(v) for v in GENERATION_METRICS)}, got {metric}" ) return v @validator("retrieval_evaluation_metrics", allow_reuse=True) def retrieval_evaluation_metrics_must_be_supported(cls, v): for metric in v: if metric not in GENERATION_METRICS: raise ValueError( f"retrieval_evaluation_metrics must be one of {', '.join(str(v) for v in RETRIEVAL_METRICS)}, got {metric}" ) return v @validator("evaluation_type", allow_reuse=True) def evaluation_type_must_be_supported(cls, v): if v not in SUPPORTED_EVALUATION_TYPES: raise ValueError( f"evaluation_type must be one of `retrieval` or `e2e`, got {v}" ) return v
[ "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler" ]
[((1785, 1845), 'pydantic.validator', 'validator', (['"""generation_evaluation_metrics"""'], {'allow_reuse': '(True)'}), "('generation_evaluation_metrics', allow_reuse=True)\n", (1794, 1845), False, 'from pydantic import BaseModel, Extra, validator\n'), ((2190, 2249), 'pydantic.validator', 'validator', (['"""retrieval_evaluation_metrics"""'], {'allow_reuse': '(True)'}), "('retrieval_evaluation_metrics', allow_reuse=True)\n", (2199, 2249), False, 'from pydantic import BaseModel, Extra, validator\n'), ((2591, 2637), 'pydantic.validator', 'validator', (['"""evaluation_type"""'], {'allow_reuse': '(True)'}), "('evaluation_type', allow_reuse=True)\n", (2600, 2637), False, 'from pydantic import BaseModel, Extra, validator\n'), ((857, 889), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (887, 889), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')]
from typing import List, Optional, Mapping, Any from functools import partial from langchain.llms.base import LLM from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from transformers import AutoModel, AutoTokenizer from peft import get_peft_model, LoraConfig, TaskType import os import torch class ChatGLM3(LLM): model_path: str max_length: int = 8192 temperature: float = 0.1 top_p: float = 0.7 history: List = [] streaming: bool = True model: object = None tokenizer: object = None """ def __init__(self, model_path: str, max_length: int = 8192, temperature: float = 0.1, top_p: float = 0.7, history: List = None, streaming: bool = True): self.model_path = model_path self.max_length = max_length self.temperature = temperature self.top_p = top_p self.history = [] if history is None else history self.streaming = streaming self.model = None self.tokenizer = None """ @property def _llm_type(self) -> str: return "chatglm3-6B" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, add_history: bool = False ) -> str: if self.model is None or self.tokenizer is None: raise RuntimeError("Must call `load_model()` to load model and tokenizer!") if self.streaming: text_callback = partial(StreamingStdOutCallbackHandler().on_llm_new_token, verbose=True) resp = self.generate_resp(prompt, text_callback, add_history=add_history) else: resp = self.generate_resp(self, prompt, add_history=add_history) return resp def generate_resp(self, prompt, text_callback=None, add_history=True): resp = "" index = 0 if text_callback: for i, (resp, _) in enumerate(self.model.stream_chat( self.tokenizer, prompt, self.history, max_length=self.max_length, top_p=self.top_p, temperature=self.temperature )): if add_history: if i == 0: self.history += [[prompt, resp]] else: self.history[-1] = [prompt, resp] text_callback(resp[index:]) index = len(resp) else: resp, _ = self.model.chat( self.tokenizer, prompt, self.history, max_length=self.max_length, top_p=self.top_p, temperature=self.temperature ) if add_history: self.history += [[prompt, resp]] return resp def load_model(self): if self.model is not None or self.tokenizer is not None: return self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True) self.model = AutoModel.from_pretrained(self.model_path, trust_remote_code=True).half().cuda().eval() def load_model_from_checkpoint(self, checkpoint=None): if self.model is not None or self.tokenizer is not None: return self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True) self.model = AutoModel.from_pretrained(self.model_path, trust_remote_code=True).half() peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, inference_mode=False, r=8, target_modules=['query_key_value'], lora_alpha=32, lora_dropout=0.1, ) self.model = get_peft_model(self.model, peft_config).to("cuda") if checkpoint=="text_classification": model_dir = "./output/checkpoint-3000/" peft_path = "{}/chatglm-lora.pt".format(model_dir) if os.path.exists(peft_path): self.model.load_state_dict(torch.load(peft_path), strict=False)
[ "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler" ]
[((3052, 3122), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_path'], {'trust_remote_code': '(True)'}), '(self.model_path, trust_remote_code=True)\n', (3081, 3122), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((3401, 3471), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_path'], {'trust_remote_code': '(True)'}), '(self.model_path, trust_remote_code=True)\n', (3430, 3471), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((3589, 3729), 'peft.LoraConfig', 'LoraConfig', ([], {'task_type': 'TaskType.CAUSAL_LM', 'inference_mode': '(False)', 'r': '(8)', 'target_modules': "['query_key_value']", 'lora_alpha': '(32)', 'lora_dropout': '(0.1)'}), "(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=8,\n target_modules=['query_key_value'], lora_alpha=32, lora_dropout=0.1)\n", (3599, 3729), False, 'from peft import get_peft_model, LoraConfig, TaskType\n'), ((4081, 4106), 'os.path.exists', 'os.path.exists', (['peft_path'], {}), '(peft_path)\n', (4095, 4106), False, 'import os\n'), ((3493, 3559), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['self.model_path'], {'trust_remote_code': '(True)'}), '(self.model_path, trust_remote_code=True)\n', (3518, 3559), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((3854, 3893), 'peft.get_peft_model', 'get_peft_model', (['self.model', 'peft_config'], {}), '(self.model, peft_config)\n', (3868, 3893), False, 'from peft import get_peft_model, LoraConfig, TaskType\n'), ((1564, 1596), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (1594, 1596), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((4152, 4173), 'torch.load', 'torch.load', (['peft_path'], {}), '(peft_path)\n', (4162, 4173), False, 'import torch\n'), ((3144, 3210), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['self.model_path'], {'trust_remote_code': '(True)'}), '(self.model_path, trust_remote_code=True)\n', (3169, 3210), False, 'from transformers import AutoModel, AutoTokenizer\n')]
# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. from datasets import load_dataset import json import unicodedata def remove_control_characters(s): return "".join(ch for ch in s if unicodedata.category(ch)[0]!="C") from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter( chunk_size = 1536, # chars, not llm tokens chunk_overlap = 0, length_function = len, is_separator_regex = False, ) documents = load_dataset('Shitao/MLDR', "corpus-en", split='corpus') feed_file = "/tmp/vespa_feed_file_en.json" with open(feed_file, "w") as f: for doc in documents: id = doc["docid"] text = doc['text'] chunks = text_splitter.create_documents([text]) text_chunks = [chunk.page_content for chunk in chunks] text_chunks = [remove_control_characters(chunk) for chunk in text_chunks] vespa_feed_doc = { "put": "id:%s:doc::%s" % ("en", id), "fields": { "text": text_chunks } } f.write(json.dumps(vespa_feed_doc)) f.write("\n")
[ "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((362, 477), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1536)', 'chunk_overlap': '(0)', 'length_function': 'len', 'is_separator_regex': '(False)'}), '(chunk_size=1536, chunk_overlap=0,\n length_function=len, is_separator_regex=False)\n', (392, 477), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((539, 595), 'datasets.load_dataset', 'load_dataset', (['"""Shitao/MLDR"""', '"""corpus-en"""'], {'split': '"""corpus"""'}), "('Shitao/MLDR', 'corpus-en', split='corpus')\n", (551, 595), False, 'from datasets import load_dataset\n'), ((1148, 1174), 'json.dumps', 'json.dumps', (['vespa_feed_doc'], {}), '(vespa_feed_doc)\n', (1158, 1174), False, 'import json\n'), ((244, 268), 'unicodedata.category', 'unicodedata.category', (['ch'], {}), '(ch)\n', (264, 268), False, 'import unicodedata\n')]
from time import sleep import copy import redis import json import pickle import traceback from flask import Response, request, stream_with_context from typing import Dict, Union import os from langchain.schema import HumanMessage, SystemMessage from backend.api.language_model import get_llm from backend.main import app, message_id_register, message_pool, logger from backend.utils.streaming import single_round_chat_with_agent_streaming from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL from backend.schemas import DEFAULT_USER_ID from real_agents.adapters.llm import BaseLanguageModel from real_agents.adapters.agent_helpers import AgentExecutor, Tool from real_agents.adapters.callbacks.agent_streaming import \ AgentStreamingStdOutCallbackHandler from real_agents.adapters.models import ChatOpenAI from real_agents.adapters.memory import ConversationReActBufferMemory from real_agents.adapters.data_model import DataModel, JsonDataModel from real_agents.adapters.interactive_executor import initialize_webot_agent from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor r = redis.Redis(host=os.getenv("REDIS_SERVER"), port=6379, db=0) # adjust host/port/db as needed # here webot and webot_status are stored in redis since the two global variable can not be modified and accessed normally in multiprocess # fixme:now webot is stored without message_id or chat_id info, so it can only be used for one chat at a time # fixme:now webot_status is stored with chat_id info, if the status is not reset after a message ended abnormally e.g. the message is interrupted, it will be reused wrongly for the next chat def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor: data = r.get(f'webot_{user_id}_{chat_id}') if data is not None: webot = pickle.loads(data) else: # initialize a webot with None instrucition if webot does not exist webot = WebBrowsingExecutor(None) save_webot_to_redis(user_id, chat_id, webot) return webot def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ): r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot)) def get_webot_status_from_redis(user_id: str, chat_id: str): webot_status_json = r.get(f'webot_status_{user_id}_{chat_id}') if webot_status_json is not None: webot_status = json.loads(webot_status_json) return webot_status else: return {} def save_webot_status_to_redis(user_id: str, chat_id: str, webot_status: Dict): r.set(f'webot_status_{user_id}_{chat_id}', json.dumps(webot_status)) def reset_webot(user_id: str, chat_id: str): webot = WebBrowsingExecutor(None) save_webot_to_redis(user_id, chat_id, webot) def reset_webot_status(user_id: str, chat_id: str): webot_status = {"webot_status": "idle", "url": None} save_webot_status_to_redis(user_id, chat_id, webot_status) # this function has been deprecated def get_plan(instruction: str, start_url: str, chat_llm: ChatOpenAI): # fixme: Move this into a separate chain or executors to decompose the LLMs system_message = f""" You are a planner to assist another browser automation assistant. Here is the instruction for the other assistant: ``` You MUST take one of the following actions. NEVER EVER EVER make up actions that do not exist: 1. click(element): Clicks on an element 2. setValue(element, value: string): Focuses on and sets the value of an input element 3. finish(): Indicates the task is finished 4. fail(): Indicates that you are unable to complete the task You will be be given a task to perform and the current state of the DOM. You will also be given previous actions that you have taken. You may retry a failed action up to one time. This is an example of an action: <Thought>I should click the add to cart button</Thought> <Action>click(223)</Action> You MUST always include the <Thought> and <Action> open/close tags or else your response will be marked as invalid. Rules you MUST follow: 1. You must only take one step at a time. You cannot take multiple actions in a single response. 2. You should not consider the action to present the result to the user. You only need to do available actions. If info in current page is enough for the user to solve the problem, you should finish. ``` Now your responsibility is to give a step-by-step plan according to user's instruction. This plan will be given to the assistant as a reference when it is performing tasks. """.strip() human_message = f""" The user requests the following task: {instruction} Now you are at {start_url} Provide a plan to do this (you can use pseudo description as below to describe the item). Here is an example case: request: Go to google calendar to schedule a meeting current url: "https://google.com" example plan: 1. setValue(searchBar, "google calendar") 2. click(search) 3. click(the item with title of google calendar) 4.1 if user has loginned do nothing 4.2 if user hasn't loginned do login 5. click(create event button) 6. setValue(event title input bar, "meeting") 7. click(save event button) 8. finish() """.strip() messages = [SystemMessage(content=system_message), HumanMessage(content=human_message)] response = chat_llm(messages).content return response def create_webot_interaction_executor( llm: BaseLanguageModel, llm_name: str, user_id: str, chat_id: str ) -> AgentExecutor: """Creates an agent executor for interaction. Args: llm: A llm model. llm_name: A string llm name. user_id: A string of user id. chat_id: A string chat id. Returns: An agent executor. """ # Initialize memory memory = ConversationReActBufferMemory(memory_key="chat_history", return_messages=True, max_token_limit=10000) class RunWebot: def __init__(self, webot: WebotExecutor, llm: BaseLanguageModel, user_id: str, chat_id: str): self.llm = llm self.webot = webot self.user_id = user_id self.chat_id = chat_id def run(self, term: str) -> Union[str, Dict, DataModel]: try: user_id = self.user_id chat_id = self.chat_id reset_webot(user_id=user_id, chat_id=chat_id) reset_webot_status(user_id=user_id, chat_id=chat_id) raw_observation = self.webot.run(user_intent=term, llm=self.llm) instruction, start_url = raw_observation["instruction"], \ raw_observation["start_url"] webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) webot.instruction = instruction # webot.plan = get_plan(instruction, start_url) webot.plan = "" save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot) webot_status = { "webot_status": "running", "url": start_url } save_webot_status_to_redis(user_id=user_id, chat_id=chat_id, webot_status=webot_status) while True: webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) if webot.finish or webot.interrupt or webot.error or webot.fail: break else: sleep(0.5) save_webot_status_to_redis(user_id=user_id, chat_id=chat_id, webot_status={"webot_status": "idle", "url": None}) webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) webot.instruction = None save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot) if webot.finish: webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) action_history = webot.action_history last_page = webot.pages_viewed[-1] observation = JsonDataModel.from_raw_data( { "success": True, "result": json.dumps({"action_history": action_history, "last_page": last_page}, indent=4), "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.fail: observation = JsonDataModel.from_raw_data( { "success": True, "result": "The webot failed to execute the instruction.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.interrupt: observation = JsonDataModel.from_raw_data( { "success": False, "result": "The web browsing is interrupted by user.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.error: observation = JsonDataModel.from_raw_data( { "success": False, "result": "Error occurs during web browsing.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation except Exception as e: print(traceback.format_exc()) observation = JsonDataModel.from_raw_data( { "success": False, "result": f"Failed in web browsing with the input: {term}, please try again later.", "intermediate_steps": json.dumps({"error": str(e)}) } ) return observation webot = WebotExecutor.from_webot() llm = copy.deepcopy(llm) run_webot = RunWebot(webot, llm, chat_id=chat_id, user_id=user_id) tools = [Tool(name=webot.name, func=run_webot.run, description=webot.description)] continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None interaction_executor = initialize_webot_agent( tools, llm, continue_model, memory=memory, verbose=True ) return interaction_executor @app.route("/api/chat_xlang_webot", methods=["POST"]) def chat_xlang_webot() -> Dict: """Returns the chat response of web agent.""" try: # Get request parameters request_json = request.get_json() user_id = request_json.pop("user_id", DEFAULT_USER_ID) chat_id = request_json["chat_id"] user_intent = request_json["user_intent"] parent_message_id = request_json["parent_message_id"] llm_name = request_json["llm_name"] temperature = request_json.get("temperature", 0.4) stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"] kwargs = { "temperature": temperature, "stop": stop_words, } # Get language model llm = get_llm(llm_name, **kwargs) logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="Request json").debug(request_json) human_message_id = message_id_register.add_variable(user_intent) ai_message_id = message_id_register.add_variable("") stream_handler = AgentStreamingStdOutCallbackHandler() # Build executor and run chat # reset webot and status reset_webot(user_id=user_id, chat_id=chat_id) reset_webot_status(user_id=user_id, chat_id=chat_id) interaction_executor = create_webot_interaction_executor( llm=llm, llm_name=llm_name, chat_id=chat_id, user_id=user_id ) activated_message_list = message_pool.get_activated_message_list(user_id, chat_id, list(), parent_message_id) message_pool.load_agent_memory_from_list(interaction_executor.memory, activated_message_list) return stream_with_context( Response( single_round_chat_with_agent_streaming( interaction_executor=interaction_executor, user_intent=user_intent, human_message_id=human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id=chat_id, message_list=activated_message_list, parent_message_id=parent_message_id, stream_handler=stream_handler, llm_name=llm_name, app_type="webot", ), content_type="application/json", ) ) except Exception as e: import traceback traceback.print_exc() return Response(response=None, status=f"{OVERLOAD} backend is currently overloaded")
[ "langchain.schema.HumanMessage", "langchain.schema.SystemMessage" ]
[((11305, 11357), 'backend.main.app.route', 'app.route', (['"""/api/chat_xlang_webot"""'], {'methods': "['POST']"}), "('/api/chat_xlang_webot', methods=['POST'])\n", (11314, 11357), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((2664, 2689), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (2683, 2689), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((5769, 5875), 'real_agents.adapters.memory.ConversationReActBufferMemory', 'ConversationReActBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(10000)'}), "(memory_key='chat_history', return_messages=\n True, max_token_limit=10000)\n", (5798, 5875), False, 'from real_agents.adapters.memory import ConversationReActBufferMemory\n'), ((10859, 10885), 'real_agents.web_agent.WebotExecutor.from_webot', 'WebotExecutor.from_webot', ([], {}), '()\n', (10883, 10885), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((10896, 10914), 'copy.deepcopy', 'copy.deepcopy', (['llm'], {}), '(llm)\n', (10909, 10914), False, 'import copy\n'), ((11176, 11255), 'real_agents.adapters.interactive_executor.initialize_webot_agent', 'initialize_webot_agent', (['tools', 'llm', 'continue_model'], {'memory': 'memory', 'verbose': '(True)'}), '(tools, llm, continue_model, memory=memory, verbose=True)\n', (11198, 11255), False, 'from real_agents.adapters.interactive_executor import initialize_webot_agent\n'), ((1125, 1150), 'os.getenv', 'os.getenv', (['"""REDIS_SERVER"""'], {}), "('REDIS_SERVER')\n", (1134, 1150), False, 'import os\n'), ((1810, 1828), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (1822, 1828), False, 'import pickle\n'), ((1931, 1956), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (1950, 1956), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((2152, 2171), 'pickle.dumps', 'pickle.dumps', (['webot'], {}), '(webot)\n', (2164, 2171), False, 'import pickle\n'), ((2364, 2393), 'json.loads', 'json.loads', (['webot_status_json'], {}), '(webot_status_json)\n', (2374, 2393), False, 'import json\n'), ((2579, 2603), 'json.dumps', 'json.dumps', (['webot_status'], {}), '(webot_status)\n', (2589, 2603), False, 'import json\n'), ((5172, 5209), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (5185, 5209), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((5227, 5262), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_message'}), '(content=human_message)\n', (5239, 5262), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((10999, 11071), 'real_agents.adapters.agent_helpers.Tool', 'Tool', ([], {'name': 'webot.name', 'func': 'run_webot.run', 'description': 'webot.description'}), '(name=webot.name, func=run_webot.run, description=webot.description)\n', (11003, 11071), False, 'from real_agents.adapters.agent_helpers import AgentExecutor, Tool\n'), ((11505, 11523), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (11521, 11523), False, 'from flask import Response, request, stream_with_context\n'), ((12048, 12075), 'backend.api.language_model.get_llm', 'get_llm', (['llm_name'], {}), '(llm_name, **kwargs)\n', (12055, 12075), False, 'from backend.api.language_model import get_llm\n'), ((12237, 12282), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['user_intent'], {}), '(user_intent)\n', (12269, 12282), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12307, 12343), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['""""""'], {}), "('')\n", (12339, 12343), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12370, 12407), 'real_agents.adapters.callbacks.agent_streaming.AgentStreamingStdOutCallbackHandler', 'AgentStreamingStdOutCallbackHandler', ([], {}), '()\n', (12405, 12407), False, 'from real_agents.adapters.callbacks.agent_streaming import AgentStreamingStdOutCallbackHandler\n'), ((13127, 13224), 'backend.main.message_pool.load_agent_memory_from_list', 'message_pool.load_agent_memory_from_list', (['interaction_executor.memory', 'activated_message_list'], {}), '(interaction_executor.memory,\n activated_message_list)\n', (13167, 13224), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((14066, 14087), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14085, 14087), False, 'import traceback\n'), ((14103, 14180), 'flask.Response', 'Response', ([], {'response': 'None', 'status': 'f"""{OVERLOAD} backend is currently overloaded"""'}), "(response=None, status=f'{OVERLOAD} backend is currently overloaded')\n", (14111, 14180), False, 'from flask import Response, request, stream_with_context\n'), ((12085, 12173), 'backend.main.logger.bind', 'logger.bind', ([], {'user_id': 'user_id', 'chat_id': 'chat_id', 'api': '"""/chat"""', 'msg_head': '"""Request json"""'}), "(user_id=user_id, chat_id=chat_id, api='/chat', msg_head=\n 'Request json')\n", (12096, 12173), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((13344, 13714), 'backend.utils.streaming.single_round_chat_with_agent_streaming', 'single_round_chat_with_agent_streaming', ([], {'interaction_executor': 'interaction_executor', 'user_intent': 'user_intent', 'human_message_id': 'human_message_id', 'ai_message_id': 'ai_message_id', 'user_id': 'user_id', 'chat_id': 'chat_id', 'message_list': 'activated_message_list', 'parent_message_id': 'parent_message_id', 'stream_handler': 'stream_handler', 'llm_name': 'llm_name', 'app_type': '"""webot"""'}), "(interaction_executor=\n interaction_executor, user_intent=user_intent, human_message_id=\n human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id\n =chat_id, message_list=activated_message_list, parent_message_id=\n parent_message_id, stream_handler=stream_handler, llm_name=llm_name,\n app_type='webot')\n", (13382, 13714), False, 'from backend.utils.streaming import single_round_chat_with_agent_streaming\n'), ((7547, 7557), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (7552, 7557), False, 'from time import sleep\n'), ((10439, 10461), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10459, 10461), False, 'import traceback\n'), ((8392, 8477), 'json.dumps', 'json.dumps', (["{'action_history': action_history, 'last_page': last_page}"], {'indent': '(4)'}), "({'action_history': action_history, 'last_page': last_page}, indent=4\n )\n", (8402, 8477), False, 'import json\n'), ((8574, 8648), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (8584, 8648), False, 'import json\n'), ((9103, 9177), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9113, 9177), False, 'import json\n'), ((9634, 9708), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9644, 9708), False, 'import json\n'), ((10154, 10228), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (10164, 10228), False, 'import json\n')]
import os import json from langchain.schema import messages_from_dict, messages_to_dict from langchain.memory import ( ConversationBufferMemory, ChatMessageHistory, ) class YeagerAIContext: """Context for the @yeager.ai agent.""" def __init__(self, username: str, session_id: str, session_path: str): self.username = username self.session_id = session_id self.session_path = session_path self.session_message_history = ChatMessageHistory() self.chat_buffer_memory = ConversationBufferMemory( memory_key="chat_history", input_key="input" ) def load_session_message_history(self): try: with open(os.path.join(self.session_path, "session_history.txt"), "r") as f: dicts = json.loads(f.read()) self.session_message_history.messages = messages_from_dict(dicts) except FileNotFoundError: os.makedirs(self.session_path, exist_ok=True) with open(os.path.join(self.session_path, "session_history.txt"), "w") as f: f.close() def save_session_message_history(self): dicts = messages_to_dict(self.session_message_history.messages) with open(os.path.join(self.session_path, "session_history.txt"), "w") as f: f.write(json.dumps(dicts)) f.close() def create_shadow_clones(self): self.load_session_message_history() self.chat_buffer_memory.chat_memory = self.session_message_history def dispell_shadow_clones(self): self.session_message_history = self.chat_buffer_memory.chat_memory self.save_session_message_history()
[ "langchain.memory.ConversationBufferMemory", "langchain.memory.ChatMessageHistory", "langchain.schema.messages_to_dict", "langchain.schema.messages_from_dict" ]
[((472, 492), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (490, 492), False, 'from langchain.memory import ConversationBufferMemory, ChatMessageHistory\n'), ((527, 597), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'input_key': '"""input"""'}), "(memory_key='chat_history', input_key='input')\n", (551, 597), False, 'from langchain.memory import ConversationBufferMemory, ChatMessageHistory\n'), ((1162, 1217), 'langchain.schema.messages_to_dict', 'messages_to_dict', (['self.session_message_history.messages'], {}), '(self.session_message_history.messages)\n', (1178, 1217), False, 'from langchain.schema import messages_from_dict, messages_to_dict\n'), ((868, 893), 'langchain.schema.messages_from_dict', 'messages_from_dict', (['dicts'], {}), '(dicts)\n', (886, 893), False, 'from langchain.schema import messages_from_dict, messages_to_dict\n'), ((940, 985), 'os.makedirs', 'os.makedirs', (['self.session_path'], {'exist_ok': '(True)'}), '(self.session_path, exist_ok=True)\n', (951, 985), False, 'import os\n'), ((1236, 1290), 'os.path.join', 'os.path.join', (['self.session_path', '"""session_history.txt"""'], {}), "(self.session_path, 'session_history.txt')\n", (1248, 1290), False, 'import os\n'), ((1323, 1340), 'json.dumps', 'json.dumps', (['dicts'], {}), '(dicts)\n', (1333, 1340), False, 'import json\n'), ((700, 754), 'os.path.join', 'os.path.join', (['self.session_path', '"""session_history.txt"""'], {}), "(self.session_path, 'session_history.txt')\n", (712, 754), False, 'import os\n'), ((1008, 1062), 'os.path.join', 'os.path.join', (['self.session_path', '"""session_history.txt"""'], {}), "(self.session_path, 'session_history.txt')\n", (1020, 1062), False, 'import os\n')]
import argparse import os import subprocess import time import gradio as gr from huggingface_hub import snapshot_download from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import ( Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader, ) from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Chroma from openai import OpenAI from chat_with_mlx import __version__ from chat_with_mlx.models.utils import model_info from chat_with_mlx.rag.utils import get_prompt os.environ["TOKENIZERS_PARALLELISM"] = "False" SUPPORTED_LANG = [ "default", "English", "Spanish", "Chinese", "Vietnamese", "Japanese", "Korean", "Indian", "Turkish", "German", "French", "Italian", ] openai_api_base = "http://127.0.0.1:8080/v1" model_dicts, yml_path, cfg_list, mlx_config = model_info() model_list = list(cfg_list.keys()) client = OpenAI(api_key="EMPTY", base_url=openai_api_base) text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50) emb = HuggingFaceEmbeddings( model_name="nomic-ai/nomic-embed-text-v1.5", model_kwargs={"trust_remote_code": True}, ) vectorstore = None def load_model(model_name, lang): global process, rag_prompt, rag_his_prompt, sys_prompt, default_lang default_lang = "default" prompts, sys_prompt = get_prompt(f"{yml_path[cfg_list[model_name]]}", lang) rag_prompt, rag_his_prompt = prompts[0], prompts[1] model_name_list = cfg_list[model_name].split("/") directory_path = os.path.dirname(os.path.abspath(__file__)) local_model_dir = os.path.join( directory_path, "models", "download", model_name_list[1] ) if not os.path.exists(local_model_dir): snapshot_download(repo_id=mlx_config[model_name], local_dir=local_model_dir) command = ["python3", "-m", "mlx_lm.server", "--model", local_model_dir] try: process = subprocess.Popen( command, stdin=subprocess.PIPE, stderr=subprocess.PIPE, text=True ) process.stdin.write("y\n") process.stdin.flush() return {model_status: "Model Loaded"} except Exception as e: return {model_status: f"Exception occurred: {str(e)}"} def kill_process(): global process process.terminate() time.sleep(2) if process.poll() is None: # Check if the process has indeed terminated process.kill() # Force kill if still running print("Model Killed") return {model_status: "Model Unloaded"} def check_file_type(file_path): # Check for document file extensions if ( file_path.endswith(".pdf") or file_path.endswith(".txt") or file_path.endswith(".doc") or file_path.endswith(".docx") ): return True # Check for YouTube link formats elif ( file_path.startswith("https://www.youtube.com/") or file_path.startswith("https://youtube.com/") or file_path.startswith("https://youtu.be/") ): return True else: return False def upload(files): supported = check_file_type(files) if supported: return {url: files, index_status: "Not Done"} else: return {url: "File type not supported", index_status: "Not Done"} def indexing(mode, url): global vectorstore try: if mode == "Files (docx, pdf, txt)": if url.endswith(".pdf"): loader = PyPDFLoader(url) elif url.endswith(".docx"): loader = Docx2txtLoader(url) elif url.endswith(".txt"): loader = TextLoader(url) splits = loader.load_and_split(text_splitter) elif mode == "YouTube (url)": loader = YoutubeLoader.from_youtube_url( url, add_video_info=False, language=["en", "vi"] ) splits = loader.load_and_split(text_splitter) vectorstore = Chroma.from_documents(documents=splits, embedding=emb) return {index_status: "Indexing Done"} except Exception as e: # Print the error message or return it as part of the response print(f"Error: {e}") # This will print the error to the console or log return {"index_status": "Indexing Error", "error_message": str(e)} def kill_index(): global vectorstore vectorstore = None return {index_status: "Indexing Undone"} def build_rag_context(docs): context = "" for doc in docs: context += doc.page_content + "\n" return context def chatbot(query, history, temp, max_tokens, freq_penalty, k_docs): global chat_history, sys_prompt if "vectorstore" in globals() and vectorstore is not None: if len(history) == 0: chat_history = [] if sys_prompt is not None: chat_history.append({"role": "system", "content": sys_prompt}) docs = vectorstore.similarity_search(query, k=k_docs) else: history_str = "" for i, message in enumerate(history): history_str += f"User: {message[0]}\n" history_str += f"AI: {message[1]}\n" if sys_prompt is not None: chat_history.append({"role": "system", "content": sys_prompt}) chat_history.append({"role": "user", "content": history_str}) docs = vectorstore.similarity_search(history_str) context = build_rag_context(docs) if len(history) == 0: prompt = rag_prompt.format(context=context, question=query) else: prompt = rag_his_prompt.format( chat_history=history_str, context=context, question=query ) messages = [{"role": "user", "content": prompt}] else: if len(history) == 0: chat_history = [] if sys_prompt is not None: chat_history.append({"role": "system", "content": sys_prompt}) else: chat_history = [] if sys_prompt is not None: chat_history.append({"role": "system", "content": sys_prompt}) for i, message in enumerate(history): chat_history.append({"role": "user", "content": message[0]}) chat_history.append({"role": "assistant", "content": message[1]}) chat_history.append({"role": "user", "content": query}) messages = chat_history # Uncomment for debugging # print(messages) response = client.chat.completions.create( model="gpt", messages=messages, temperature=temp, frequency_penalty=freq_penalty, max_tokens=max_tokens, stream=True, ) stop = ["<|im_end|>", "<|endoftext|>"] partial_message = "" for chunk in response: if len(chunk.choices) != 0: if chunk.choices[0].delta.content not in stop: partial_message = partial_message + chunk.choices[0].delta.content else: partial_message = partial_message + "" yield partial_message with gr.Blocks(fill_height=True, theme=gr.themes.Soft()) as demo: model_name = gr.Dropdown( label="Model", info="Select your model", choices=sorted(model_list), interactive=True, render=False, ) temp_slider = gr.State(0.2) max_gen_token = gr.State(512) freq_penalty = gr.State(1.05) retrieve_docs = gr.State(3) language = gr.State("default") gr.ChatInterface( chatbot=gr.Chatbot(height=600, render=False), fn=chatbot, # Function to call on user input title="Chat with MLX🍎", # Title of the web page description="Chat with your data using Apple MLX Backend", # Description additional_inputs=[temp_slider, max_gen_token, freq_penalty, retrieve_docs], ) with gr.Accordion("Advanced Setting", open=False): with gr.Row(): with gr.Column(scale=2): temp_slider = gr.Slider( label="Temperature", value=0.2, minimum=0.0, maximum=1.0, step=0.05, interactive=True, ) max_gen_token = gr.Slider( label="Max Tokens", value=512, minimum=512, maximum=4096, step=256, interactive=True, ) with gr.Column(scale=2): freq_penalty = gr.Slider( label="Frequency Penalty", value=1.05, minimum=-2, maximum=2, step=0.05, interactive=True, ) retrieve_docs = gr.Slider( label="No. Retrieval Docs", value=3, minimum=1, maximum=10, step=1, interactive=True, ) with gr.Row(): with gr.Column(scale=2): model_name.render() language = gr.Dropdown( label="Language", choices=sorted(SUPPORTED_LANG), value="default", interactive=True, ) btn1 = gr.Button("Load Model", variant="primary") btn3 = gr.Button("Unload Model", variant="stop") with gr.Column(scale=4): with gr.Row(): with gr.Column(scale=9): mode = gr.Dropdown( label="Dataset", info="Choose your dataset type", choices=["Files (docx, pdf, txt)", "YouTube (url)"], scale=5, ) url = gr.Textbox( label="URL", info="Enter your filepath (URL for Youtube)", interactive=True, ) upload_button = gr.UploadButton( label="Upload File", variant="primary" ) # data = gr.Textbox(visible=lambda mode: mode == 'YouTube') with gr.Column(scale=1): model_status = gr.Textbox("Model Not Loaded", label="Model Status") index_status = gr.Textbox("Not Index", label="Index Status") btn1.click( load_model, inputs=[model_name, language], outputs=[model_status], ) btn3.click(kill_process, outputs=[model_status]) upload_button.upload( upload, inputs=upload_button, outputs=[url, index_status] ) index_button = gr.Button("Start Indexing", variant="primary") index_button.click( indexing, inputs=[mode, url], outputs=[index_status] ) stop_index_button = gr.Button("Stop Indexing") stop_index_button.click(kill_index, outputs=[index_status]) def app(port, share): print(f"Starting MLX Chat on port {port}") print(f"Sharing: {share}") demo.launch(inbrowser=True, share=share, server_port=port) def main(): parser = argparse.ArgumentParser( description="Chat with MLX \n" "Native RAG on MacOS and Apple Silicon with MLX 🧑‍💻" ) parser.add_argument( "--version", action="version", version=f"Chat with MLX {__version__}" ) parser.add_argument( "--port", type=int, default=7860, help="Port number to run the app", ) parser.add_argument( "--share", default=False, help="Enable sharing the app", ) args = parser.parse_args() app(port=args.port, share=args.share)
[ "langchain_community.document_loaders.Docx2txtLoader", "langchain_community.document_loaders.PyPDFLoader", "langchain_community.document_loaders.TextLoader", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain_community.vectorstores.Chroma.from_documents", "langchain_community.document_loaders.YoutubeLoader.from_youtube_url", "langchain_community.embeddings.HuggingFaceEmbeddings" ]
[((934, 946), 'chat_with_mlx.models.utils.model_info', 'model_info', ([], {}), '()\n', (944, 946), False, 'from chat_with_mlx.models.utils import model_info\n'), ((991, 1040), 'openai.OpenAI', 'OpenAI', ([], {'api_key': '"""EMPTY"""', 'base_url': 'openai_api_base'}), "(api_key='EMPTY', base_url=openai_api_base)\n", (997, 1040), False, 'from openai import OpenAI\n'), ((1057, 1121), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(50)'}), '(chunk_size=512, chunk_overlap=50)\n', (1087, 1121), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1128, 1240), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""nomic-ai/nomic-embed-text-v1.5"""', 'model_kwargs': "{'trust_remote_code': True}"}), "(model_name='nomic-ai/nomic-embed-text-v1.5',\n model_kwargs={'trust_remote_code': True})\n", (1149, 1240), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((1431, 1484), 'chat_with_mlx.rag.utils.get_prompt', 'get_prompt', (['f"""{yml_path[cfg_list[model_name]]}"""', 'lang'], {}), "(f'{yml_path[cfg_list[model_name]]}', lang)\n", (1441, 1484), False, 'from chat_with_mlx.rag.utils import get_prompt\n'), ((1681, 1751), 'os.path.join', 'os.path.join', (['directory_path', '"""models"""', '"""download"""', 'model_name_list[1]'], {}), "(directory_path, 'models', 'download', model_name_list[1])\n", (1693, 1751), False, 'import os\n'), ((2378, 2391), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2388, 2391), False, 'import time\n'), ((7394, 7407), 'gradio.State', 'gr.State', (['(0.2)'], {}), '(0.2)\n', (7402, 7407), True, 'import gradio as gr\n'), ((7428, 7441), 'gradio.State', 'gr.State', (['(512)'], {}), '(512)\n', (7436, 7441), True, 'import gradio as gr\n'), ((7461, 7475), 'gradio.State', 'gr.State', (['(1.05)'], {}), '(1.05)\n', (7469, 7475), True, 'import gradio as gr\n'), ((7496, 7507), 'gradio.State', 'gr.State', (['(3)'], {}), '(3)\n', (7504, 7507), True, 'import gradio as gr\n'), ((7523, 7542), 'gradio.State', 'gr.State', (['"""default"""'], {}), "('default')\n", (7531, 7542), True, 'import gradio as gr\n'), ((11507, 11620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chat with MLX \nNative RAG on MacOS and Apple Silicon with MLX 🧑\u200d💻"""'}), '(description=\n """Chat with MLX \nNative RAG on MacOS and Apple Silicon with MLX 🧑\u200d💻""")\n', (11530, 11620), False, 'import argparse\n'), ((1632, 1657), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1647, 1657), False, 'import os\n'), ((1778, 1809), 'os.path.exists', 'os.path.exists', (['local_model_dir'], {}), '(local_model_dir)\n', (1792, 1809), False, 'import os\n'), ((1819, 1895), 'huggingface_hub.snapshot_download', 'snapshot_download', ([], {'repo_id': 'mlx_config[model_name]', 'local_dir': 'local_model_dir'}), '(repo_id=mlx_config[model_name], local_dir=local_model_dir)\n', (1836, 1895), False, 'from huggingface_hub import snapshot_download\n'), ((2002, 2089), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdin': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'text': '(True)'}), '(command, stdin=subprocess.PIPE, stderr=subprocess.PIPE,\n text=True)\n', (2018, 2089), False, 'import subprocess\n'), ((4005, 4059), 'langchain_community.vectorstores.Chroma.from_documents', 'Chroma.from_documents', ([], {'documents': 'splits', 'embedding': 'emb'}), '(documents=splits, embedding=emb)\n', (4026, 4059), False, 'from langchain_community.vectorstores import Chroma\n'), ((7912, 7956), 'gradio.Accordion', 'gr.Accordion', (['"""Advanced Setting"""'], {'open': '(False)'}), "('Advanced Setting', open=False)\n", (7924, 7956), True, 'import gradio as gr\n'), ((9136, 9144), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9142, 9144), True, 'import gradio as gr\n'), ((7172, 7188), 'gradio.themes.Soft', 'gr.themes.Soft', ([], {}), '()\n', (7186, 7188), True, 'import gradio as gr\n'), ((7581, 7617), 'gradio.Chatbot', 'gr.Chatbot', ([], {'height': '(600)', 'render': '(False)'}), '(height=600, render=False)\n', (7591, 7617), True, 'import gradio as gr\n'), ((7971, 7979), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (7977, 7979), True, 'import gradio as gr\n'), ((9159, 9177), 'gradio.Column', 'gr.Column', ([], {'scale': '(2)'}), '(scale=2)\n', (9168, 9177), True, 'import gradio as gr\n'), ((9429, 9471), 'gradio.Button', 'gr.Button', (['"""Load Model"""'], {'variant': '"""primary"""'}), "('Load Model', variant='primary')\n", (9438, 9471), True, 'import gradio as gr\n'), ((9491, 9532), 'gradio.Button', 'gr.Button', (['"""Unload Model"""'], {'variant': '"""stop"""'}), "('Unload Model', variant='stop')\n", (9500, 9532), True, 'import gradio as gr\n'), ((9546, 9564), 'gradio.Column', 'gr.Column', ([], {'scale': '(4)'}), '(scale=4)\n', (9555, 9564), True, 'import gradio as gr\n'), ((3514, 3530), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['url'], {}), '(url)\n', (3525, 3530), False, 'from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader\n'), ((3813, 3898), 'langchain_community.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['url'], {'add_video_info': '(False)', 'language': "['en', 'vi']"}), "(url, add_video_info=False, language=['en', 'vi']\n )\n", (3843, 3898), False, 'from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader\n'), ((7998, 8016), 'gradio.Column', 'gr.Column', ([], {'scale': '(2)'}), '(scale=2)\n', (8007, 8016), True, 'import gradio as gr\n'), ((8048, 8149), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Temperature"""', 'value': '(0.2)', 'minimum': '(0.0)', 'maximum': '(1.0)', 'step': '(0.05)', 'interactive': '(True)'}), "(label='Temperature', value=0.2, minimum=0.0, maximum=1.0, step=\n 0.05, interactive=True)\n", (8057, 8149), True, 'import gradio as gr\n'), ((8316, 8416), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Max Tokens"""', 'value': '(512)', 'minimum': '(512)', 'maximum': '(4096)', 'step': '(256)', 'interactive': '(True)'}), "(label='Max Tokens', value=512, minimum=512, maximum=4096, step=\n 256, interactive=True)\n", (8325, 8416), True, 'import gradio as gr\n'), ((8568, 8586), 'gradio.Column', 'gr.Column', ([], {'scale': '(2)'}), '(scale=2)\n', (8577, 8586), True, 'import gradio as gr\n'), ((8619, 8723), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Frequency Penalty"""', 'value': '(1.05)', 'minimum': '(-2)', 'maximum': '(2)', 'step': '(0.05)', 'interactive': '(True)'}), "(label='Frequency Penalty', value=1.05, minimum=-2, maximum=2,\n step=0.05, interactive=True)\n", (8628, 8723), True, 'import gradio as gr\n'), ((8891, 8991), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""No. Retrieval Docs"""', 'value': '(3)', 'minimum': '(1)', 'maximum': '(10)', 'step': '(1)', 'interactive': '(True)'}), "(label='No. Retrieval Docs', value=3, minimum=1, maximum=10, step=\n 1, interactive=True)\n", (8900, 8991), True, 'import gradio as gr\n'), ((9583, 9591), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9589, 9591), True, 'import gradio as gr\n'), ((3596, 3615), 'langchain_community.document_loaders.Docx2txtLoader', 'Docx2txtLoader', (['url'], {}), '(url)\n', (3610, 3615), False, 'from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader\n'), ((9614, 9632), 'gradio.Column', 'gr.Column', ([], {'scale': '(9)'}), '(scale=9)\n', (9623, 9632), True, 'import gradio as gr\n'), ((9661, 9789), 'gradio.Dropdown', 'gr.Dropdown', ([], {'label': '"""Dataset"""', 'info': '"""Choose your dataset type"""', 'choices': "['Files (docx, pdf, txt)', 'YouTube (url)']", 'scale': '(5)'}), "(label='Dataset', info='Choose your dataset type', choices=[\n 'Files (docx, pdf, txt)', 'YouTube (url)'], scale=5)\n", (9672, 9789), True, 'import gradio as gr\n'), ((9930, 10021), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""URL"""', 'info': '"""Enter your filepath (URL for Youtube)"""', 'interactive': '(True)'}), "(label='URL', info='Enter your filepath (URL for Youtube)',\n interactive=True)\n", (9940, 10021), True, 'import gradio as gr\n'), ((10149, 10204), 'gradio.UploadButton', 'gr.UploadButton', ([], {'label': '"""Upload File"""', 'variant': '"""primary"""'}), "(label='Upload File', variant='primary')\n", (10164, 10204), True, 'import gradio as gr\n'), ((10349, 10367), 'gradio.Column', 'gr.Column', ([], {'scale': '(1)'}), '(scale=1)\n', (10358, 10367), True, 'import gradio as gr\n'), ((10404, 10456), 'gradio.Textbox', 'gr.Textbox', (['"""Model Not Loaded"""'], {'label': '"""Model Status"""'}), "('Model Not Loaded', label='Model Status')\n", (10414, 10456), True, 'import gradio as gr\n'), ((10492, 10537), 'gradio.Textbox', 'gr.Textbox', (['"""Not Index"""'], {'label': '"""Index Status"""'}), "('Not Index', label='Index Status')\n", (10502, 10537), True, 'import gradio as gr\n'), ((10982, 11028), 'gradio.Button', 'gr.Button', (['"""Start Indexing"""'], {'variant': '"""primary"""'}), "('Start Indexing', variant='primary')\n", (10991, 11028), True, 'import gradio as gr\n'), ((11208, 11234), 'gradio.Button', 'gr.Button', (['"""Stop Indexing"""'], {}), "('Stop Indexing')\n", (11217, 11234), True, 'import gradio as gr\n'), ((3680, 3695), 'langchain_community.document_loaders.TextLoader', 'TextLoader', (['url'], {}), '(url)\n', (3690, 3695), False, 'from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, TextLoader, YoutubeLoader\n')]
import time import numpy as np import torch from torch.nn import functional as F ########## # Functions for IMDB demo notebook. # Data source: Stanford AI Lab https://ai.stanford.edu/~amaas/data/sentiment/ ########## # Output words instead of scores. def sentiment_score_to_name(score: float): if score > 0: return "Positive" elif score <= 0: return "Negative" # Split data into train, valid, test. def partition_dataset(df_input, new_columns, smoke_test=False): """Splits data, assuming original, input dataframe contains 50K rows. Args: df_input (pandas.DataFrame): input data frame smoke_test (boolean): if True, use smaller number of rows for testing Returns: df_train, df_val, df_test (pandas.DataFrame): train, valid, test splits. """ # Shuffle data and split into train/val/test. df_shuffled = df_input.sample(frac=1, random_state=1).reset_index() df_shuffled.columns = new_columns df_train = df_shuffled.iloc[:35_000] df_val = df_shuffled.iloc[35_000:40_000] df_test = df_shuffled.iloc[40_000:] # Save train/val/test split data locally in separate files. df_train.to_csv("train.csv", index=False, encoding="utf-8") df_val.to_csv("val.csv", index=False, encoding="utf-8") df_test.to_csv("test.csv", index=False, encoding="utf-8") return df_shuffled, df_train, df_val, df_test # Function for experimenting with chunk_size. def imdb_chunk_text(encoder, batch_size, df, chunk_size, chunk_overlap): batch = df.head(batch_size).copy() print(f"chunk size: {chunk_size}") print(f"original shape: {batch.shape}") start_time = time.time() # 1. Change primary key type to string. batch["movie_index"] = batch["movie_index"].apply(lambda x: str(x)) # 2. Split the documents into smaller chunks and add as new column to batch df. batch['chunk'] = batch['text'].apply(recursive_splitter_wrapper, chunk_size=chunk_size, chunk_overlap=chunk_overlap) # Explode the 'chunk' column to create new rows for each chunk. batch = batch.explode('chunk', ignore_index=True) print(f"new shape: {batch.shape}") # 3. Add embeddings as new column in df. review_embeddings = torch.tensor(encoder.encode(batch['chunk'])) # Normalize embeddings to unit length. review_embeddings = F.normalize(review_embeddings, p=2, dim=1) # Quick check if embeddings are normalized. norms = np.linalg.norm(review_embeddings, axis=1) assert np.allclose(norms, 1.0, atol=1e-5) == True # 4. Convert embeddings to list of `numpy.ndarray`, each containing `numpy.float32` numbers. converted_values = list(map(np.float32, review_embeddings)) batch['vector'] = converted_values # 5. Reorder columns for conveneince, so index first, labels at end. new_order = ["movie_index", "text", "chunk", "vector", "label_int", "label"] batch = batch[new_order] end_time = time.time() print(f"Chunking + embedding time for {batch_size} docs: {end_time - start_time} sec") # Inspect the batch of data. display(batch.head()) # assert len(batch.chunk[0]) <= MAX_SEQ_LENGTH-1 # assert len(batch.vector[0]) == EMBEDDING_LENGTH print(f"type embeddings: {type(batch.vector)} of {type(batch.vector[0])}") print(f"of numbers: {type(batch.vector[0][0])}") # Chunking looks good, drop the original text column. batch.drop(columns=["text"], inplace=True) return batch # Function for embedding a query. def embed_query(encoder, query): # Embed the query using same embedding model used to create the Milvus collection. query_embeddings = torch.tensor(encoder.encode(query)) # Normalize embeddings to unit length. query_embeddings = F.normalize(query_embeddings, p=2, dim=1) # Quick check if embeddings are normalized. norms = np.linalg.norm(query_embeddings, axis=1) assert np.allclose(norms, 1.0, atol=1e-5) == True # Convert the embeddings to list of list of np.float32. query_embeddings = list(map(np.float32, query_embeddings)) return query_embeddings ########## # Functions for LangChain chunking and embedding. ########## from typing import List from langchain.text_splitter import RecursiveCharacterTextSplitter def recursive_splitter_wrapper(text, chunk_size, chunk_overlap): # Default chunk overlap is 10% chunk_size. chunk_overlap = np.round(chunk_size * 0.10, 0) # Use langchain's convenient recursive chunking method. text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len, ) chunks: List[str] = text_splitter.split_text(text) # Replace special characters with spaces. chunks = [text.replace("<br /><br />", " ") for text in chunks] return chunks ########## # Functions to process Milvus Client API responses. ########## def client_assemble_retrieved_context(retrieved_top_k, metadata_fields=[], num_shot_answers=3): """ For each question, assemble the context and metadata from the retrieved_top_k chunks. retrieved_top_k: list of dicts """ # Assemble the context as a stuffed string. distances = [] context = [] context_metadata = [] i = 1 for r in retrieved_top_k[0]: distances.append(r['distance']) if i <= num_shot_answers: if len(metadata_fields) > 0: metadata = {} for field in metadata_fields: metadata[field] = r['entity'][field] context_metadata.append(metadata) context.append(r['entity']['chunk']) i += 1 # Assemble formatted results in a zipped list. formatted_results = list(zip(distances, context, context_metadata)) # Return all the things for convenience. return formatted_results, context, context_metadata ########## # Functions to process Milvus Search API responses. ########## # Parse out the answer and context metadata from Milvus Search response. def assemble_answer_sources(answer, context_metadata): """Assemble the answer and grounding sources into a string""" grounded_answer = f"Answer: {answer}\n" grounded_answer += "Grounding sources and citations:\n" for metadata in context_metadata: try: grounded_answer += f"'h1': {metadata['h1']}, 'h2':{metadata['h2']}\n" except: pass try: grounded_answer += f"'source': {metadata['source']}" except: pass return grounded_answer # Stuff answers into a context string and stuff metadata into a list of dicts. def assemble_retrieved_context(retrieved_results, metadata_fields=[], num_shot_answers=3): # Assemble the context as a stuffed string. # Also save the context metadata to retrieve along with the answer. context = [] context_metadata = [] i = 1 for r in retrieved_results[0]: if i <= num_shot_answers: if len(metadata_fields) > 0: metadata = {} for field in metadata_fields: metadata[field] = getattr(r.entity, field, None) context_metadata.append(metadata) context.append(r.entity.text) i += 1 return context, context_metadata
[ "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((1670, 1681), 'time.time', 'time.time', ([], {}), '()\n', (1679, 1681), False, 'import time\n'), ((2431, 2473), 'torch.nn.functional.normalize', 'F.normalize', (['review_embeddings'], {'p': '(2)', 'dim': '(1)'}), '(review_embeddings, p=2, dim=1)\n', (2442, 2473), True, 'from torch.nn import functional as F\n'), ((2534, 2575), 'numpy.linalg.norm', 'np.linalg.norm', (['review_embeddings'], {'axis': '(1)'}), '(review_embeddings, axis=1)\n', (2548, 2575), True, 'import numpy as np\n'), ((3031, 3042), 'time.time', 'time.time', ([], {}), '()\n', (3040, 3042), False, 'import time\n'), ((3838, 3879), 'torch.nn.functional.normalize', 'F.normalize', (['query_embeddings'], {'p': '(2)', 'dim': '(1)'}), '(query_embeddings, p=2, dim=1)\n', (3849, 3879), True, 'from torch.nn import functional as F\n'), ((3940, 3980), 'numpy.linalg.norm', 'np.linalg.norm', (['query_embeddings'], {'axis': '(1)'}), '(query_embeddings, axis=1)\n', (3954, 3980), True, 'import numpy as np\n'), ((4486, 4515), 'numpy.round', 'np.round', (['(chunk_size * 0.1)', '(0)'], {}), '(chunk_size * 0.1, 0)\n', (4494, 4515), True, 'import numpy as np\n'), ((4598, 4706), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'length_function': 'len'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, length_function=len)\n', (4628, 4706), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2587, 2622), 'numpy.allclose', 'np.allclose', (['norms', '(1.0)'], {'atol': '(1e-05)'}), '(norms, 1.0, atol=1e-05)\n', (2598, 2622), True, 'import numpy as np\n'), ((3992, 4027), 'numpy.allclose', 'np.allclose', (['norms', '(1.0)'], {'atol': '(1e-05)'}), '(norms, 1.0, atol=1e-05)\n', (4003, 4027), True, 'import numpy as np\n')]
from langchain.chains import LLMChain from langchain_core.prompts import PromptTemplate from langchain_openai import ChatOpenAI from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") llm_creative = ChatOpenAI(temperature=1, model_name="gpt-3.5-turbo") def get_summary_chain() -> LLMChain: summary_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 1. a short summary 2. two interesting facts about them \n{format_instructions} """ summary_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=summary_template, partial_variables={ "format_instructions": summary_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=summary_prompt_template) def get_interests_chain() -> LLMChain: interesting_facts_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 3 topics that might interest them \n{format_instructions} """ interesting_facts_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=interesting_facts_template, partial_variables={ "format_instructions": topics_of_interest_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=interesting_facts_prompt_template) def get_ice_breaker_chain() -> LLMChain: ice_breaker_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 2 creative Ice breakers with them that are derived from their activity on Linkedin and twitter, preferably on latest tweets \n{format_instructions} """ ice_breaker_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=ice_breaker_template, partial_variables={ "format_instructions": ice_breaker_parser.get_format_instructions() }, ) return LLMChain(llm=llm_creative, prompt=ice_breaker_prompt_template)
[ "langchain.chains.LLMChain", "langchain_openai.ChatOpenAI" ]
[((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=1, model_name='gpt-3.5-turbo')\n", (304, 347), False, 'from langchain_openai import ChatOpenAI\n'), ((933, 982), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'summary_prompt_template'}), '(llm=llm, prompt=summary_prompt_template)\n', (941, 982), False, 'from langchain.chains import LLMChain\n'), ((1580, 1639), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'interesting_facts_prompt_template'}), '(llm=llm, prompt=interesting_facts_prompt_template)\n', (1588, 1639), False, 'from langchain.chains import LLMChain\n'), ((2304, 2366), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_creative', 'prompt': 'ice_breaker_prompt_template'}), '(llm=llm_creative, prompt=ice_breaker_prompt_template)\n', (2312, 2366), False, 'from langchain.chains import LLMChain\n'), ((863, 903), 'output_parsers.summary_parser.get_format_instructions', 'summary_parser.get_format_instructions', ([], {}), '()\n', (901, 903), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((1499, 1550), 'output_parsers.topics_of_interest_parser.get_format_instructions', 'topics_of_interest_parser.get_format_instructions', ([], {}), '()\n', (1548, 1550), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((2230, 2274), 'output_parsers.ice_breaker_parser.get_format_instructions', 'ice_breaker_parser.get_format_instructions', ([], {}), '()\n', (2272, 2274), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n')]
# coding: UTF-8 import gc import glob import torch import time import os import json from collections import defaultdict from langchain.embeddings.huggingface import HuggingFaceEmbeddings from langchain.schema import Document from langchain.vectorstores import FAISS from tqdm import tqdm import config import re base_path = config.BASE_PATH shortname_allname_path = 'output/company/shortname_allname.json' shortname_allname = json.load(open(shortname_allname_path,"r")) text_path = "../../alltxt" output_path = "output" vector_path = "output/vector_bert" chinese_numbers = "一二三四五六七八九十" english_numbers = "123456789" pattern = f"^([{chinese_numbers}]+、|\([{chinese_numbers}]\)|([{chinese_numbers}])|[{english_numbers}]、|\([{english_numbers}]\)|([{english_numbers}])|[{english_numbers}].)" pattern_1 = f"^([{chinese_numbers}]+、)" pattern_2 = f"^(\([{chinese_numbers}]\)|([{chinese_numbers}])|[{english_numbers}]、|\([{english_numbers}]\)|([{english_numbers}])|[{english_numbers}].)" pattern2 = f"^([{chinese_numbers}]+、)" def docs_add_one(strs,doc_id,): result = [] metadata = {"source": f'doc_{doc_id}'} if isinstance(strs, str): result.append(Document(page_content=strs, metadata=metadata)) return result def split_context(all_content,end_page,start_allrow): docs = [] temp_dict2 = {} temp_dict2_new = {} for index,context in enumerate(all_content): context = eval(context) if not context: continue inside = context['inside'] page = context['page'] allrow = int(context['allrow']) if int(page) < end_page and int(allrow) >= start_allrow: # 在文本中查找所有匹配项 matches1 = re.findall(pattern_1, inside) matches2 = re.findall(pattern_2, inside) if len(inside)<30: if matches1: temp_dict2[inside] = all_content[index+1:index + 180] elif matches2: temp_dict2[inside] = all_content[index+1:index + 60] doc_id = 0 for key,values in temp_dict2.items(): strs = '' max_lengh = 1 if re.findall(pattern_1, key): max_lengh = 2200 elif re.findall(pattern_2, key): max_lengh = 800 for index_tmp,value in enumerate(values): value = eval(value) if not value: continue inside = value['inside'] text_type = value['type'] matches2 = re.findall(pattern2, inside) if (matches2 and index_tmp > 4) or (len(strs)>max_lengh): break if text_type == '页眉' or text_type == '页脚' or inside == '': continue strs += f'\n{inside}' if strs: new_key = re.sub(pattern,'', key) if new_key: temp_dict2_new[new_key] = strs # input_text = new_key input_text = new_key + ":\n" + strs single_vector = docs_add_one(input_text,doc_id) docs.extend(single_vector) doc_id += 1 return temp_dict2_new,docs def covert_data_vector_opening(company_year,embeddings): vector_store = [] temp_dict2 = {} if company_year in shortname_allname: file = shortname_allname[company_year] try: with open(os.path.join(base_path, file), "r", encoding='utf-8') as f: all_content = f.readlines() start_allrow = 3 end_page = 1000 for context in all_content: context = eval(context) if not context: continue inside = context['inside'] page = context['page'] allrow = int(context['allrow']) if int(page) <= 10: if inside == '一、公司信息': start_allrow = allrow end_page_list = re.findall(r'第.+节财务报告\.+(\d+)', inside) if end_page_list: end_page = int(end_page_list[0])+10 else: break temp_dict2,docs = split_context(all_content,end_page,start_allrow) vector_store = FAISS.from_documents(docs, embeddings) torch.cuda.empty_cache() gc.collect() except Exception as e: print(e) if vector_store and temp_dict2: # print(f"{company_year}向量导入成功。") return (vector_store,temp_dict2) else: return []
[ "langchain.vectorstores.FAISS.from_documents", "langchain.schema.Document" ]
[((2132, 2158), 're.findall', 're.findall', (['pattern_1', 'key'], {}), '(pattern_1, key)\n', (2142, 2158), False, 'import re\n'), ((1175, 1221), 'langchain.schema.Document', 'Document', ([], {'page_content': 'strs', 'metadata': 'metadata'}), '(page_content=strs, metadata=metadata)\n', (1183, 1221), False, 'from langchain.schema import Document\n'), ((1701, 1730), 're.findall', 're.findall', (['pattern_1', 'inside'], {}), '(pattern_1, inside)\n', (1711, 1730), False, 'import re\n'), ((1754, 1783), 're.findall', 're.findall', (['pattern_2', 'inside'], {}), '(pattern_2, inside)\n', (1764, 1783), False, 'import re\n'), ((2202, 2228), 're.findall', 're.findall', (['pattern_2', 'key'], {}), '(pattern_2, key)\n', (2212, 2228), False, 'import re\n'), ((2490, 2518), 're.findall', 're.findall', (['pattern2', 'inside'], {}), '(pattern2, inside)\n', (2500, 2518), False, 'import re\n'), ((2781, 2805), 're.sub', 're.sub', (['pattern', '""""""', 'key'], {}), "(pattern, '', key)\n", (2787, 2805), False, 'import re\n'), ((4234, 4272), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (4254, 4272), False, 'from langchain.vectorstores import FAISS\n'), ((4285, 4309), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4307, 4309), False, 'import torch\n'), ((4322, 4334), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4332, 4334), False, 'import gc\n'), ((3353, 3382), 'os.path.join', 'os.path.join', (['base_path', 'file'], {}), '(base_path, file)\n', (3365, 3382), False, 'import os\n'), ((3942, 3982), 're.findall', 're.findall', (['"""第.+节财务报告\\\\.+(\\\\d+)"""', 'inside'], {}), "('第.+节财务报告\\\\.+(\\\\d+)', inside)\n", (3952, 3982), False, 'import re\n')]
# -*- coding: UTF-8 -*- """ @Project : AI-Vtuber @File : claude_model.py @Author : HildaM @Email : Hilda_quan@163.com @Date : 2023/06/17 下午 4:44 @Description : 本地向量数据库模型设置 """ from langchain.embeddings import HuggingFaceEmbeddings import os # 项目根路径 TEC2VEC_MODELS_PATH = os.getcwd() + "\\" + "data" + "\\" + "text2vec_models" "\\" # 默认模型 DEFAULT_MODEL_NAME = "sebastian-hofstaetter_distilbert-dot-tas_b-b256-msmarco" def get_default_model(): return HuggingFaceEmbeddings(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME) def get_text2vec_model(model_name): """ 0. 判空。若为空,加载内置模型 1. 先判断项目data/tec2vec_models目录中是否存在模型 2. 存在则直接加载 3. 不存在,则从Huggingface中下载到本地,保存在系统cache中 """ if model_name is None: return HuggingFaceEmbeddings(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME) model_path = TEC2VEC_MODELS_PATH + model_name if os.path.exists(model_path): return HuggingFaceEmbeddings(model_name=model_path) else: return HuggingFaceEmbeddings(model_name=model_name)
[ "langchain.embeddings.HuggingFaceEmbeddings" ]
[((468, 542), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)'}), '(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)\n', (489, 542), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((908, 934), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (922, 934), False, 'import os\n'), ((775, 849), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)'}), '(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)\n', (796, 849), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((951, 995), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_path'}), '(model_name=model_path)\n', (972, 995), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1021, 1065), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1042, 1065), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((283, 294), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (292, 294), False, 'import os\n')]
import datetime import json import pkgutil import time import uuid import os import copy from dataclasses import asdict import datasets as ds from cot.config import Config from cot.utils.schemas.cot import features as cot_features # disable transformation (e.g. map) caching # https://huggingface.co/docs/datasets/v2.6.1/en/package_reference/main_classes#datasets.disable_caching ds.disable_caching() FRAGMENTS = json.loads(pkgutil.get_data(__name__, "fragments.json")) def generate_and_extract(data, config): """ It takes a dataset and a config and generates cots for each example and extract answers. :param data: Dataset/DatasetDict - the dataset you want to generate CoTs for and extract answers :param config: Dictionary - the configurations of the input and model :return: the dataset with generated cots and extracted answers """ ds.disable_caching() data.cleanup_cache_files() if isinstance(data, ds.arrow_dataset.Dataset): features = data.info.features question_type = data[0]["type"] question_number_choices = len(data[0]["choices"]) elif isinstance(data, ds.dataset_dict.DatasetDict): name_of_first_split = list(data.keys())[0] features = data[name_of_first_split].info.features question_type = data[name_of_first_split][0]["type"] question_number_choices = len(data[name_of_first_split][0]["choices"]) else: raise ValueError("Not recognized data") # automated change of answer_extraction depending on the type of the task and the number of choices # if type str make list if isinstance(config["answer_extraction_keys"], str): config["answer_extraction_keys"] = [config["answer_extraction_keys"]] # make copy of config, so it is not changed permanently (but only for the current dataset), when auto-kojima is used: adaptive_config = config.copy() if adaptive_config["answer_extraction_keys"] == ["auto-kojima"]: adaptive_config["answer_extraction_keys"] = adaptive_answer_extraction("auto-kojima", question_type, question_number_choices) # The config is transformed into a dataclass object, where all testing is done # But it will be transformed back to a dictionary for the function 'map' config_as_dataclass = Config(**adaptive_config) return data.map( _generate_and_extract, with_indices=True, fn_kwargs=asdict(config_as_dataclass), features=features, load_from_cache_file=False, ) def _generate_and_extract( item, idx, # all of the following variables will be defined by the config_as_dataclass object idx_range, author, api_service, engine, temperature, max_tokens, api_time_interval, instruction_keys, cot_trigger_keys, template_cot_generation, answer_extraction_keys, template_answer_extraction, warn, verbose, ): """ The function takes in a JSON object (item) and generates a CoT (Chain-of-Thought) for each combination of of instructions and CoT triggers. For each generated CoT and for each of the given answer extractions it extracts an answer. :param item: the item (example) of a dataset to be processed :param idx: the index of the item in the dataset other parameters are handed over from config and are described in config.py :return: item populated with various fields """ if idx_range == "all" or (idx >= idx_range[0] and idx < idx_range[1]): pass else: return item # predefine values in template dictionary that stay same over all runs of the current item template_dict = { "instruction": None, "question": item["question"], "answer_choices": multiple_choice_answer_formatting(item["choices"]), "cot_trigger": None, "cot": None, "answer_extraction": None, } # try multiple times in case of API-Error additional_api_time = 0 number_of_tries = 5 for i in range(0, number_of_tries): try: # add additional time to api_time_interval if there was an error api_time_interval = api_time_interval + additional_api_time # generate chain of thoughts and extract answers for instruction_key in instruction_keys: template_dict["instruction"] = get_fragments_value("instructions", instruction_key) for cot_trigger_key in cot_trigger_keys: generated_cot = { "id": str(uuid.uuid4()), "fragments_version": FRAGMENTS["version"], "instruction": instruction_key, "cot_trigger": cot_trigger_key, "cot_trigger_template": template_cot_generation, "prompt_text": "", "cot": "", "answers": [], "author": author, "date": "", "api_service": api_service, "model": str( { "name": engine, "temperature": temperature, "max_tokens": max_tokens, } ), "comment": "", "annotations": [], } template_dict["cot_trigger"] = get_fragments_value("cot_triggers", cot_trigger_key) # change template_cot_generation to generated_cot["cot_trigger_template"] to make it more logical generate_cot_prompt = format_prompt(template_cot_generation, template_dict) if verbose: print("\n-----------------COT TRIGGER TEXT-----------------") print(generate_cot_prompt) cot = query_model( generate_cot_prompt, api_service, engine, temperature, max_tokens, api_time_interval, ) if verbose: print("\n------------------GENERATED COT-------------------") print(cot) template_dict["cot"] = cot generated_cot["cot"] = cot # deactivated automatic prompt text generation: (code line stays here for testing purposes) # generated_cot["prompt_text"] = generate_cot_prompt generated_cot["date"] = print_now(1) # extract answers from generated chain of thoughts for answer_extraction_key in answer_extraction_keys: if answer_extraction_key is None: pass else: answer = { "id": str(uuid.uuid4()), "answer_extraction": answer_extraction_key, "answer_extraction_template": template_answer_extraction, "answer_extraction_text": "", "answer": "", "answer_from_choices": "", "correct_answer": None, } template_dict["answer_extraction"] = get_fragments_value("answer_extractions", answer_extraction_key) answer_extraction_prompt = format_prompt(template_answer_extraction, template_dict) if verbose: print("\n----------------ANSWER EXTRACTION TEXT----------------") print(answer_extraction_prompt) predicted_answer = query_model( answer_extraction_prompt, api_service, engine, temperature, max_tokens, api_time_interval, ) if verbose: print("\n------------------EXTRACTED ANSWER-------------------") print(predicted_answer) answer["answer"] = predicted_answer # deactivated automatic prompt text generation: (code line stays here for testing purposes) # answer["answer_extraction_text"] = answer_extraction_prompt generated_cot["answers"].append(answer) item["generated_cot"].append(generated_cot) except Exception as ex: # if last try, raise error if i == number_of_tries - 1: raise ex # if not last try, add additional time to api_time_interval and try again additional_api_time += 10 print("(API-)Error in item " + str(idx) + ": " + str(ex)) print("Retrying with additional time of " + str(additional_api_time) + " seconds.") pass else: break return item def helper(data): ds.disable_caching() data.cleanup_cache_files() if isinstance(data, ds.arrow_dataset.Dataset): features = data.info.features elif isinstance(data, ds.dataset_dict.DatasetDict): name_of_first_split = list(data.keys())[0] features = data[name_of_first_split].info.features else: raise ValueError("Not recognized data") return data, features """ Input: item, langchains, triggers Output: cot and answer Generate a cot and extract an answer with helper function _self_generate_extract """ def self_generate_extract(data,input_dict): data, features = helper(data) return data.map( _self_generate_extract, with_indices=True, fn_kwargs=input_dict, features=features, load_from_cache_file=False, ) def _self_generate_extract(item,idx,input_dict): shadow_input_dict = copy.deepcopy(input_dict) chain = input_dict['chain'] del shadow_input_dict['chain'] shadow_input_dict['question'] = item["question"] shadow_input_dict['answer_choices'] = multiple_choice_answer_formatting(item["choices"]) #get cot and predicted answer lang_chain = chain(shadow_input_dict) generated_cot = { "id": str(uuid.uuid4()), "fragments_version": None, "instruction": input_dict['instruction'], "cot_trigger": input_dict['cot_trigger'], "cot_trigger_template": "", "prompt_text": "", "cot": lang_chain['cot'], "answers": [], "author": "", "date": "", "api_service": "", "model": str( { "name": input_dict['model'], "temperature": input_dict["temperature"], "max_tokens": input_dict["max_tokens"] } ), "comment": "generated and extracted", "annotations": [], } generated_cot["date"] = print_now(1) answer = { "id": str(uuid.uuid4()), "answer_extraction": input_dict['answer_extraction'], "answer_extraction_template": "", "answer_extraction_text": "", "answer": lang_chain['predicted_answer'], 'answer_from_choices':"", "correct_answer": None, } #add created answer and cot to item generated_cot["answers"].append(answer) item["generated_cot"].append(generated_cot) print(item) return item """Generate CoTs only""" def self_generate(data,input_dict): data, features = helper(data) return data.map( _self_generate, with_indices=True, fn_kwargs=input_dict, features=features, load_from_cache_file=False, ) def _self_generate(item,idx, input_dict): #feed data to input dict, isolate chain shadow_input_dict = copy.deepcopy(input_dict) chain = input_dict['chain'] del shadow_input_dict['chain'] shadow_input_dict['question'] = item["question"] shadow_input_dict['answer_choices'] = multiple_choice_answer_formatting(item["choices"]) #get cot and predicted answer lang_chain = chain(shadow_input_dict) """If conditions for input keys""" generated_cot = { "id": str(uuid.uuid4()), "fragments_version": "", "instruction": input_dict["instruction"], "cot_trigger": input_dict["cot_trigger"], "cot_trigger_template": "", "prompt_text": "", "cot": lang_chain['cot'], "answers": [], "author": "", "date": "", "api_service": input_dict["api_service"], "model": str( { "name": input_dict["model"], "temperature": input_dict["temperature"], "max_tokens": input_dict["max_tokens"] } ), "comment": "generated only", "annotations": [], } generated_cot["date"] = print_now(1) item["generated_cot"].append(generated_cot) return item """Extract answers based on CoTs only""" def self_extract(data,input_dict): data, features = helper(data) return data.map( _self_extract, with_indices=True, fn_kwargs=input_dict, features=features, load_from_cache_file=False, ) """ToDo show which CoT to take""" def _self_extract(item,idx,input_dict): #extract based on the first cot in the dataset, throw error otherwise if len(item['generated_cot'])>1: raise ValueError('Too many generated CoTs, only one allowed') else: cot = item['generated_cot'][0]['cot'] input_dict['cot'] = cot shadow_input_dict = copy.deepcopy(input_dict) chain = input_dict['chain'] del shadow_input_dict['chain'] shadow_input_dict['question'] = item["question"] shadow_input_dict['answer_choices'] = multiple_choice_answer_formatting(item["choices"]) #get cot and predicted answer lang_chain = chain(shadow_input_dict) """If conditions for input keys""" answer = { "id": str(uuid.uuid4()), "answer_extraction": input_dict['answer_extraction'], "answer_extraction_template": "", "answer_extraction_text": "", "answer": "", 'answer_from_choices':"", "correct_answer": None, } answer["answer"] = lang_chain['predicted_answer'] #we add the answer to the already existing generated cot # print(item['generated_cot'][0]["answers"]) # item['generated_cot'][0]["answers"].append(answer) # print("################") # print(item['generated_cot'][0]["answers"]) return item """Reflect on CoT (or some other part) and generate new answer""" def self_reflect(data, input_dict): data, features = helper(data) return data.map( _self_reflect, with_indices=True, fn_kwargs=input_dict, features=features, load_from_cache_file=False, ) """In this version the reflection is added to generated_cot""" def _self_reflect(item, idx, input_dict): #reflect based on the first cot in the dataset, throw error otherwise if len(item['generated_cot']) > 1: raise ValueError('Too many generated CoTs, only one allowed') else: input_dict['cot'] = item['generated_cot'][0]['cot'] shadow_input_dict = copy.deepcopy(input_dict) chain = input_dict['chain'] del shadow_input_dict['chain'] shadow_input_dict['question'] = item["question"] shadow_input_dict['answer_choices'] = multiple_choice_answer_formatting(item["choices"]) # here we take the first answer from the first cot shadow_input_dict['answer'] = item["generated_cot"][0]['answers'][0]['answer'] #this is where the magic happens lang_chain = chain(shadow_input_dict) #retrieve question and answer choices from item, add to input dict generated_cot = { "id": str(uuid.uuid4()), "fragments_version": "", "instruction": "", "cot_trigger": input_dict["reflection_prompt"], "cot_trigger_template": "", "prompt_text": "", "cot": lang_chain['reflection'], "answers": [], "author": "", "date": "", "api_service": input_dict["api_service"], "model": str( { "name": input_dict["model"], "temperature": input_dict["temperature"], "max_tokens": input_dict["max_tokens"], } ), "comment": "self_reflection cot", "annotations": [], } generated_cot["date"] = print_now(1) """If conditions for input keys""" answer = { "id": str(uuid.uuid4()), "answer_extraction": input_dict['reflect_answer_extraction'], "answer_extraction_template": "", "answer_extraction_text": "self_reflection", "answer": "", 'answer_from_choices':"", "correct_answer": None, } answer["answer"] = lang_chain['reflection_answer'] generated_cot["answers"].append(answer) item["generated_cot"].append(generated_cot) return item def full_text_prompts(dataset, prompt_text=True, answer_extraction_text=True): assert isinstance(dataset, ds.arrow_dataset.Dataset), "dataset must be an arrow dataset" dataset = dataset.map( _full_text_prompts, fn_kwargs={ "prompt_text": prompt_text, "answer_extraction_text": answer_extraction_text, }, features=dataset.info.features, load_from_cache_file=False, ) return dataset def _full_text_prompts(item, prompt_text, answer_extraction_text): # predefine values in template dictionary that stay same over all runs of the current item template_dict = { "instruction": None, "question": item["question"], "cot_trigger": None, "cot": None, "answer_extraction": None, } for generated_cot in item["generated_cot"]: answer_choices = (multiple_choice_answer_formatting(item["choices"]),) # function returns a tuple instead of a string # did not find out why it behaves differently here than in the _generate_and_extract function if type(answer_choices) == tuple: answer_choices = answer_choices[0] template_dict["answer_choices"] = answer_choices # generate chain of thoughts and extract answers # for instruction_key in instruction_keys: template_dict["instruction"] = get_fragments_value("instructions", generated_cot["instruction"]) template_dict["cot_trigger"] = get_fragments_value("cot_triggers", generated_cot["cot_trigger"]) generate_cot_prompt = format_prompt(generated_cot["cot_trigger_template"], template_dict) template_dict["cot"] = generated_cot["cot"] # Everything above could also be relevant for the answer extraction # now generating the full text for the chain of thoughts if prompt_text: generated_cot["prompt_text"] = generate_cot_prompt # if answer_extraction: ... if answer_extraction_text: # extract answers from generated chain of thoughts for answer in generated_cot["answers"]: if answer["answer_extraction"] is None: # if no answer extraction key is given, return item, since cot_prompt text is already generated return item else: template_dict["answer_extraction"] = get_fragments_value("answer_extractions", answer["answer_extraction"]) answer_extraction_prompt = format_prompt(answer["answer_extraction_template"], template_dict) answer["answer_extraction_text"] = answer_extraction_prompt return item def select_generated_cots(dataset, **kwargs): """This function handles which pregenerated CoTs are deleted (can be used after loading a collection with "load_pregenerated_cots=True"). :param dataset: The dataset to delete unwanted pregenerated CoTs from. :param kwargs: A dictionary of the form {"key": value}, where value has to be a string or list of strings. e.g. {"author": ["author1", "author2"]} or {"author": "author1"}. Overviews of current authors and their cot_triggers: "kojima": kojima-01 "wei": few-shot (as a prompt) "lievin": kojima-01, lievin-01, lievin-02, lievin-03, lievin-10 "lievin_100": 100 times kojima-01 with high temperature "thoughtsource": None, kojima-01 """ # general info why this function is necessary: # Unfortunately the loading function of the datasets does not let you specify which pregenerated COTS to load # So we load all of them and then delete the ones we don't want # disable progress bar ds.disable_progress_bar() # remove all the pregenerated COTS that are not in the list dataset = dataset.map( _select_generated_cots, fn_kwargs={**kwargs}, features=dataset.info.features, load_from_cache_file=False, ) return dataset # def _select_generated_cots(item, reverse=False, **kwargs): # # if reverse is True, unselect/delete all CoTs that match the given criteria # # load all allows keys from the cot_features # allowed_keys = list(cot_features["generated_cot"][0].keys()) + ["answer"] # for key, value in kwargs.items(): # # check if key is allowed # if key not in allowed_keys: # raise ValueError(f"Key '{key}' not in allowed keys {allowed_keys}") # # if value is None or a string, convert it to a list # if value is None or type(value) == str: # value = [value] # # loop over all generated CoTs in the item and delete the ones that don't match the given criteria # if key == "model": # if not reverse: # item["generated_cot"] = [cot for cot in item["generated_cot"] if eval(cot["model"])["name"] in value] # else: # item["generated_cot"] = [cot for cot in item["generated_cot"] if eval(cot["model"])["name"] not in value] # elif key == "answer": # if not reverse: # item["generated_cot"] = [cot for cot in item["generated_cot"] if cot["answers"][0]["correct_answer"] == value] # else: # item["generated_cot"] = [cot for cot in item["generated_cot"] if cot["answers"][0]["correct_answer"] != value] # else: # if not reverse: # item["generated_cot"] = [cot for cot in item["generated_cot"] if cot[str(key)] in value] # else: # item["generated_cot"] = [cot for cot in item["generated_cot"] if cot[str(key)] not in value] # return item def _select_generated_cots(item, reverse=False, **kwargs): allowed_keys = list(cot_features["generated_cot"][0].keys()) + ["answer"] filtered_cots = [] for key, value in kwargs.items(): if key not in allowed_keys: raise ValueError(f"Key '{key}' not in allowed keys {allowed_keys}") if value is None or type(value) == str: value = [value] if key == "model": cots = [cot for cot in item["generated_cot"] if eval(cot["model"])["name"] in value] elif key == "answer": cots = [cot for cot in item["generated_cot"] if cot["answers"][0]["correct_answer"] == value] else: cots = [cot for cot in item["generated_cot"] if cot[str(key)] in value] filtered_cots.append(cots) if reverse: # Flatten the list of filtered cots flattened_filtered_cots = [cot for sublist in filtered_cots for cot in sublist] # Remove duplicates from the flattened list unique_filtered_cots = list({id(cot): cot for cot in flattened_filtered_cots}.values()) # Remove the unique filtered cots from the original set item["generated_cot"] = [cot for cot in item["generated_cot"] if cot not in unique_filtered_cots] else: # Flatten the list of filtered cots flattened_filtered_cots = [cot for sublist in filtered_cots for cot in sublist] # Remove duplicates from the flattened list item["generated_cot"] = list({id(cot): cot for cot in flattened_filtered_cots}.values()) return item def delete_all_generated_cots(dataset): """This function deletes all pregenerated COTS from a dataset.""" dataset = dataset.map( _delete_all_generated_cots, features=dataset.info.features, load_from_cache_file=False, ) return dataset def _delete_all_generated_cots(item): item["generated_cot"] = [] return item def print_now(return_flag=0): """ It takes a flag as an argument and prints the current time in a specific format :param return_flag: 0 = print, 1 = return, defaults to 0 (optional) :return: the current time in the format of 'YYYY/MM/DD HH:MM:SS' """ now = datetime.datetime.now() now = now.strftime("%Y/%m/%d %H:%M:%S") if return_flag == 0: print(now) elif return_flag == 1: return now else: pass def multiple_choice_answer_formatting(answer_choices): """Transforms a list of answer choices into a string with letters (A,B,C,...) for each answer choice.""" # only supports uppercase letters at the moment, as this is current standard # Adding Letters (A,B,C,...) for the given multiple choice answers. return "\n".join([f"{chr(65+i)}) {example}" for i, example in enumerate(answer_choices)]) # 65 is the ASCII code for A def adaptive_answer_extraction(preference, type, len_choices): if preference == "auto-kojima": if type == "bool": return "kojima-yes-no" elif type == "multiplechoice": if len_choices == 3: answer_extraction_key = 'kojima-A-C' elif len_choices == 4: answer_extraction_key = 'kojima-A-D' elif len_choices == 5: answer_extraction_key = 'kojima-A-E' elif len_choices == 6: answer_extraction_key = 'kojima-A-F' return(answer_extraction_key) else: raise ValueError("type must be bool or multiplechoice") def get_fragments_value(str, key): if key is None: return None else: return FRAGMENTS[str][key] def format_prompt(template, dictionary): output = template.format_map(Correct_output(dictionary)) # remove leading whitespaces output = output.lstrip() return output class Correct_output(dict): # TODO: do I ever need this? I think there will never be missing keys # and None keys are handled by delete_empty_curly_brackets def __missing__(self, key): return "" def __getitem__(self, key): return dict.get(self, key) or "" # def get(self, key): # return dict.get(self, key) or "" # def delete_empty_curly_brackets(string): # string.replace("{None}\n", "") # # string.replace("\n{None}", "") # TODO: do I need this? # string.replace("{None}", "") # return string def query_model(input, api_service, engine, temperature, max_tokens, api_time_interval): if api_service == "mock_api": # time.sleep(api_time_interval) return " Test mock chain of thought." # return ("This is a " + 20 * "long " + "Mock CoT.\n")*20 # langchain package implementation else: from langchain import LLMChain, Prompt time.sleep(api_time_interval) template = "{prompt}" prompt = Prompt(template=template, input_variables=["prompt"]) if api_service == "openai": from langchain import OpenAI llm_chain = LLMChain( prompt=prompt, llm=OpenAI( # parameter options: https://beta.openai.com/docs/api-reference/completions/create-completion model_name=engine, max_tokens=max_tokens, temperature=temperature, # type: ignore (suppress pylance error) ), ) if api_service == "openai_chat": from langchain.chat_models import ChatOpenAI llm_chain = LLMChain( prompt=prompt, llm=ChatOpenAI( model_name=engine, max_tokens=max_tokens, temperature=temperature, # type: ignore (suppress pylance error) ), ) if api_service == "huggingface_hub": from langchain import HuggingFaceHub llm_chain = LLMChain( prompt=prompt, llm=HuggingFaceHub( # parameter options: https://huggingface.co/docs/api-inference/detailed_parameters repo_id=engine, model_kwargs={"temperature": temperature, "max_length": max_tokens}, # type: ignore (suppress pylance error) ), ) if api_service == "huggingface_endpoint": # from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint llm_chain = LLMChain( prompt=prompt, llm=HuggingFaceEndpoint( # we just use the engine name as the endpoint url here endpoint_url=engine, # read API key from environment variable huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"], model_kwargs={"temperature": temperature, "max_length": max_tokens}, task="text2text-generation" ), ) if api_service == "cohere": from langchain import Cohere llm_chain = LLMChain( prompt=prompt, llm=Cohere( model=engine, max_tokens=max_tokens, temperature=temperature, # type: ignore (suppress pylance error) ), ) response = llm_chain.predict(prompt=input, stop=None) return response ### this is code from the langchain package # I needed to make a small adaptation to the HuggingFaceEndpoint class to catch an Error # will be deleted in the future """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation") class HuggingFaceEndpoint(LLM, BaseModel): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please it install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError(f"Error raised by inference API: {generated_text['error']}") if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.chat_models.ChatOpenAI", "langchain.utils.get_from_dict_or_env", "langchain.Cohere", "langchain.Prompt", "langchain.llms.utils.enforce_stop_tokens", "langchain.OpenAI", "langchain.HuggingFaceHub" ]
[((383, 403), 'datasets.disable_caching', 'ds.disable_caching', ([], {}), '()\n', (401, 403), True, 'import datasets as ds\n'), ((428, 472), 'pkgutil.get_data', 'pkgutil.get_data', (['__name__', '"""fragments.json"""'], {}), "(__name__, 'fragments.json')\n", (444, 472), False, 'import pkgutil\n'), ((873, 893), 'datasets.disable_caching', 'ds.disable_caching', ([], {}), '()\n', (891, 893), True, 'import datasets as ds\n'), ((2303, 2328), 'cot.config.Config', 'Config', ([], {}), '(**adaptive_config)\n', (2309, 2328), False, 'from cot.config import Config\n'), ((9480, 9500), 'datasets.disable_caching', 'ds.disable_caching', ([], {}), '()\n', (9498, 9500), True, 'import datasets as ds\n'), ((10364, 10389), 'copy.deepcopy', 'copy.deepcopy', (['input_dict'], {}), '(input_dict)\n', (10377, 10389), False, 'import copy\n'), ((12563, 12588), 'copy.deepcopy', 'copy.deepcopy', (['input_dict'], {}), '(input_dict)\n', (12576, 12588), False, 'import copy\n'), ((14540, 14565), 'copy.deepcopy', 'copy.deepcopy', (['input_dict'], {}), '(input_dict)\n', (14553, 14565), False, 'import copy\n'), ((16328, 16353), 'copy.deepcopy', 'copy.deepcopy', (['input_dict'], {}), '(input_dict)\n', (16341, 16353), False, 'import copy\n'), ((21874, 21899), 'datasets.disable_progress_bar', 'ds.disable_progress_bar', ([], {}), '()\n', (21897, 21899), True, 'import datasets as ds\n'), ((26053, 26076), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26074, 26076), False, 'import datetime\n'), ((32963, 32979), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (32977, 32979), False, 'from pydantic import BaseModel, Extra, root_validator\n'), ((28528, 28557), 'time.sleep', 'time.sleep', (['api_time_interval'], {}), '(api_time_interval)\n', (28538, 28557), False, 'import time\n'), ((28605, 28658), 'langchain.Prompt', 'Prompt', ([], {'template': 'template', 'input_variables': "['prompt']"}), "(template=template, input_variables=['prompt'])\n", (28611, 28658), False, 'from langchain import LLMChain, Prompt\n'), ((33150, 33238), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (33170, 33238), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2427, 2454), 'dataclasses.asdict', 'asdict', (['config_as_dataclass'], {}), '(config_as_dataclass)\n', (2433, 2454), False, 'from dataclasses import asdict\n'), ((10732, 10744), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10742, 10744), False, 'import uuid\n'), ((11620, 11632), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11630, 11632), False, 'import uuid\n'), ((12970, 12982), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12980, 12982), False, 'import uuid\n'), ((14948, 14960), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14958, 14960), False, 'import uuid\n'), ((16901, 16913), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16911, 16913), False, 'import uuid\n'), ((17716, 17728), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (17726, 17728), False, 'import uuid\n'), ((35267, 35340), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (35280, 35340), False, 'import requests\n'), ((36347, 36378), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (36366, 36378), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((28823, 28896), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'engine', 'max_tokens': 'max_tokens', 'temperature': 'temperature'}), '(model_name=engine, max_tokens=max_tokens, temperature=temperature)\n', (28829, 28896), False, 'from langchain import OpenAI\n'), ((29350, 29427), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'engine', 'max_tokens': 'max_tokens', 'temperature': 'temperature'}), '(model_name=engine, max_tokens=max_tokens, temperature=temperature)\n', (29360, 29427), False, 'from langchain.chat_models import ChatOpenAI\n'), ((29763, 29866), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'engine', 'model_kwargs': "{'temperature': temperature, 'max_length': max_tokens}"}), "(repo_id=engine, model_kwargs={'temperature': temperature,\n 'max_length': max_tokens})\n", (29777, 29866), False, 'from langchain import HuggingFaceHub\n'), ((30912, 30980), 'langchain.Cohere', 'Cohere', ([], {'model': 'engine', 'max_tokens': 'max_tokens', 'temperature': 'temperature'}), '(model=engine, max_tokens=max_tokens, temperature=temperature)\n', (30918, 30980), False, 'from langchain import Cohere\n'), ((33357, 33429), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (33362, 33429), False, 'from huggingface_hub.hf_api import HfApi\n'), ((4556, 4568), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4566, 4568), False, 'import uuid\n'), ((7088, 7100), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7098, 7100), False, 'import uuid\n')]
import os import threading import time from contextlib import ExitStack from pathlib import Path from typing import cast, Optional import yaml from dotenv import load_dotenv from firebase_admin import auth from langchain.text_splitter import CharacterTextSplitter from llama_index import SimpleDirectoryReader from readerwriterlock import rwlock from realtime_ai_character.database.chroma import get_chroma from realtime_ai_character.database.connection import get_db from realtime_ai_character.logger import get_logger from realtime_ai_character.models.character import Character as CharacterModel from realtime_ai_character.utils import Character, Singleton load_dotenv() logger = get_logger(__name__) class CatalogManager(Singleton): def __init__(self): super().__init__() overwrite = os.getenv("OVERWRITE_CHROMA") != "false" # skip Chroma if Openai API key is not set if os.getenv("OPENAI_API_KEY"): self.db = get_chroma() else: self.db = get_chroma(embedding=False) overwrite = False logger.warning("OVERWRITE_CHROMA disabled due to OPENAI_API_KEY not set") self.sql_db = next(get_db()) self.sql_load_interval = 30 self.sql_load_lock = rwlock.RWLockFair() if overwrite: logger.info("Overwriting existing data in the chroma.") self.db.delete_collection() self.db = get_chroma() self.characters: dict[str, Character] = {} self.author_name_cache: dict[str, str] = {} self.load_characters("default", overwrite) self.load_characters("community", overwrite) if overwrite: logger.info("Persisting data in the chroma.") self.db.persist() logger.info(f"Total document load: {self.db._client.get_collection('llm').count()}") self.run_load_sql_db_thread = True self.load_sql_db_thread = threading.Thread(target=self.load_sql_db_loop) self.load_sql_db_thread.daemon = True self.load_sql_db_thread.start() def load_sql_db_loop(self): while self.run_load_sql_db_thread: self.load_character_from_sql_database() time.sleep(self.sql_load_interval) def stop_load_sql_db_loop(self): self.run_load_sql_db_thread = False def get_character(self, name) -> Optional[Character]: with self.sql_load_lock.gen_rlock(): return self.characters.get(name) def load_character(self, directory: Path, source: str): with ExitStack() as stack: f_yaml = stack.enter_context(open(directory / "config.yaml")) yaml_content = cast(dict, yaml.safe_load(f_yaml)) character_id = yaml_content["character_id"] character_name = yaml_content["character_name"] voice_id_env = os.getenv(character_id.upper() + "_VOICE_ID") voice_id = voice_id_env or str(yaml_content["voice_id"]) order = yaml_content.get("order", 10**6) self.characters[character_id] = Character( character_id=character_id, name=character_name, llm_system_prompt=yaml_content["system"], llm_user_prompt=yaml_content["user"], source=source, location="repo", voice_id=voice_id, author_name=yaml_content.get("author_name", ""), visibility="public" if source == "default" else yaml_content["visibility"], tts=yaml_content["text_to_speech_use"], order=order, # rebyte config rebyte_api_project_id=yaml_content["rebyte_api_project_id"], rebyte_api_agent_id=yaml_content["rebyte_api_agent_id"], rebyte_api_version=yaml_content.get("rebyte_api_version"), ) return character_name def load_data(self, character_name: str, data_path: Path): loader = SimpleDirectoryReader(data_path.absolute().as_posix()) documents = loader.load_data() text_splitter = CharacterTextSplitter(separator="\n", chunk_size=500, chunk_overlap=100) docs = text_splitter.create_documents( texts=[d.text for d in documents], metadatas=[ { "character_name": character_name, "id": d.id_, } for d in documents ], ) self.db.add_documents(docs) def load_characters(self, source: str, overwrite: bool): """ Load characters from the character_catalog directory. Use /data to create documents and add them to the chroma. :param source: 'default' or 'community' :param overwrite: if True, overwrite existing data in the chroma. """ if source == "default": path = Path(__file__).parent excluded_dirs = {"__pycache__", "archive", "community"} elif source == "community": path = Path(__file__).parent / "community" excluded_dirs = {"__pycache__", "archive"} else: raise ValueError(f"Invalid source: {source}") directories = [d for d in path.iterdir() if d.is_dir() and d.name not in excluded_dirs] for directory in directories: character_name = self.load_character(directory, source) if character_name and overwrite: logger.info("Overwriting data for character: " + character_name) self.load_data(character_name, directory / "data") logger.info(f"Loaded {len(self.characters)} characters: IDs {list(self.characters.keys())}") def load_character_from_sql_database(self): logger.info("Started loading characters from SQL database") character_models = self.sql_db.query(CharacterModel).all() with self.sql_load_lock.gen_wlock(): # delete all characters with location == 'database' keys_to_delete = [] for character_id in self.characters.keys(): if self.characters[character_id].location == "database": keys_to_delete.append(character_id) for key in keys_to_delete: del self.characters[key] # add all characters from sql database for character_model in character_models: if character_model.author_id not in self.author_name_cache: author_name = ( auth.get_user(character_model.author_id).display_name if os.getenv("USE_AUTH") == "true" else "anonymous author" ) self.author_name_cache[character_model.author_id] = author_name # type: ignore else: author_name = self.author_name_cache[character_model.author_id] character = Character( character_id=character_model.id, # type: ignore name=character_model.name, # type: ignore llm_system_prompt=character_model.system_prompt, # type: ignore llm_user_prompt=character_model.user_prompt, # type: ignore source="community", location="database", voice_id=character_model.voice_id, # type: ignore author_name=author_name, author_id=character_model.author_id, # type: ignore visibility=character_model.visibility, # type: ignore tts=character_model.tts, # type: ignore data=character_model.data, # type: ignore # rebyte config rebyte_api_project_id=character_model.rebyte_api_project_id, # type: ignore rebyte_api_agent_id=character_model.rebyte_api_agent_id, # type: ignore rebyte_api_version=character_model.rebyte_api_version, # type: ignore ) self.characters[character_model.id] = character # type: ignore # TODO: load context data from storage logger.info(f"Loaded {len(character_models)} characters from sql database") def get_catalog_manager() -> CatalogManager: return CatalogManager.get_instance() if __name__ == "__main__": manager = CatalogManager.get_instance()
[ "langchain.text_splitter.CharacterTextSplitter" ]
[((664, 677), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (675, 677), False, 'from dotenv import load_dotenv\n'), ((687, 707), 'realtime_ai_character.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (697, 707), False, 'from realtime_ai_character.logger import get_logger\n'), ((917, 944), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (926, 944), False, 'import os\n'), ((1263, 1282), 'readerwriterlock.rwlock.RWLockFair', 'rwlock.RWLockFair', ([], {}), '()\n', (1280, 1282), False, 'from readerwriterlock import rwlock\n'), ((1937, 1983), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.load_sql_db_loop'}), '(target=self.load_sql_db_loop)\n', (1953, 1983), False, 'import threading\n'), ((4113, 4185), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(500)', 'chunk_overlap': '(100)'}), "(separator='\\n', chunk_size=500, chunk_overlap=100)\n", (4134, 4185), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((814, 843), 'os.getenv', 'os.getenv', (['"""OVERWRITE_CHROMA"""'], {}), "('OVERWRITE_CHROMA')\n", (823, 843), False, 'import os\n'), ((968, 980), 'realtime_ai_character.database.chroma.get_chroma', 'get_chroma', ([], {}), '()\n', (978, 980), False, 'from realtime_ai_character.database.chroma import get_chroma\n'), ((1017, 1044), 'realtime_ai_character.database.chroma.get_chroma', 'get_chroma', ([], {'embedding': '(False)'}), '(embedding=False)\n', (1027, 1044), False, 'from realtime_ai_character.database.chroma import get_chroma\n'), ((1188, 1196), 'realtime_ai_character.database.connection.get_db', 'get_db', ([], {}), '()\n', (1194, 1196), False, 'from realtime_ai_character.database.connection import get_db\n'), ((1436, 1448), 'realtime_ai_character.database.chroma.get_chroma', 'get_chroma', ([], {}), '()\n', (1446, 1448), False, 'from realtime_ai_character.database.chroma import get_chroma\n'), ((2210, 2244), 'time.sleep', 'time.sleep', (['self.sql_load_interval'], {}), '(self.sql_load_interval)\n', (2220, 2244), False, 'import time\n'), ((2550, 2561), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (2559, 2561), False, 'from contextlib import ExitStack\n'), ((2684, 2706), 'yaml.safe_load', 'yaml.safe_load', (['f_yaml'], {}), '(f_yaml)\n', (2698, 2706), False, 'import yaml\n'), ((4912, 4926), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4916, 4926), False, 'from pathlib import Path\n'), ((6968, 7571), 'realtime_ai_character.utils.Character', 'Character', ([], {'character_id': 'character_model.id', 'name': 'character_model.name', 'llm_system_prompt': 'character_model.system_prompt', 'llm_user_prompt': 'character_model.user_prompt', 'source': '"""community"""', 'location': '"""database"""', 'voice_id': 'character_model.voice_id', 'author_name': 'author_name', 'author_id': 'character_model.author_id', 'visibility': 'character_model.visibility', 'tts': 'character_model.tts', 'data': 'character_model.data', 'rebyte_api_project_id': 'character_model.rebyte_api_project_id', 'rebyte_api_agent_id': 'character_model.rebyte_api_agent_id', 'rebyte_api_version': 'character_model.rebyte_api_version'}), "(character_id=character_model.id, name=character_model.name,\n llm_system_prompt=character_model.system_prompt, llm_user_prompt=\n character_model.user_prompt, source='community', location='database',\n voice_id=character_model.voice_id, author_name=author_name, author_id=\n character_model.author_id, visibility=character_model.visibility, tts=\n character_model.tts, data=character_model.data, rebyte_api_project_id=\n character_model.rebyte_api_project_id, rebyte_api_agent_id=\n character_model.rebyte_api_agent_id, rebyte_api_version=character_model\n .rebyte_api_version)\n", (6977, 7571), False, 'from realtime_ai_character.utils import Character, Singleton\n'), ((5057, 5071), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5061, 5071), False, 'from pathlib import Path\n'), ((6632, 6653), 'os.getenv', 'os.getenv', (['"""USE_AUTH"""'], {}), "('USE_AUTH')\n", (6641, 6653), False, 'import os\n'), ((6551, 6591), 'firebase_admin.auth.get_user', 'auth.get_user', (['character_model.author_id'], {}), '(character_model.author_id)\n', (6564, 6591), False, 'from firebase_admin import auth\n')]
import sys from dotenv import load_dotenv from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.llms import OpenAI from commands import chrome_click_on_link, chrome_get_the_links_on_the_page, chrome_open_url, chrome_read_the_page, computer_applescript_action, say_text # load environment variables load_dotenv() def main(command): llm = OpenAI(temperature=0) tools = [ computer_applescript_action, chrome_open_url, chrome_get_the_links_on_the_page, chrome_click_on_link, chrome_read_the_page ] agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) result = agent.run(command) if result: say_text(f'The result is {result}') else: say_text(f'Finished doing {command}') if __name__ == "__main__": command = sys.argv[1] if not command: print("Please provide a command to execute e.g. python main.py 'Open the calculator app'") exit(1) main(command)
[ "langchain.llms.OpenAI", "langchain.agents.initialize_agent" ]
[((350, 363), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (361, 363), False, 'from dotenv import load_dotenv\n'), ((394, 415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (400, 415), False, 'from langchain.llms import OpenAI\n'), ((613, 692), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (629, 692), False, 'from langchain.agents import initialize_agent\n'), ((750, 785), 'commands.say_text', 'say_text', (['f"""The result is {result}"""'], {}), "(f'The result is {result}')\n", (758, 785), False, 'from commands import chrome_click_on_link, chrome_get_the_links_on_the_page, chrome_open_url, chrome_read_the_page, computer_applescript_action, say_text\n'), ((804, 841), 'commands.say_text', 'say_text', (['f"""Finished doing {command}"""'], {}), "(f'Finished doing {command}')\n", (812, 841), False, 'from commands import chrome_click_on_link, chrome_get_the_links_on_the_page, chrome_open_url, chrome_read_the_page, computer_applescript_action, say_text\n')]
import os from typing import Optional from langchain import LLMChain, OpenAI, PromptTemplate from langchain.chains.base import Chain from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.llms.base import BaseLLM from langchain.llms.loading import load_llm DEFAULT_LLM = None # Default template, no memory TEMPLATE = """ You are working with a pandas dataframe in Python. The name of the dataframe is `df`. The dataframe has the following columns: {df_columns}. You should execute code as commanded to either provide information to answer the question or to do the transformations required. You should not assign any variables; you should return a one-liner in Pandas. This is your objective: {query} Go! ```python print(df.head()) ``` ```output {df_head} ``` ```python""" PROMPT = PromptTemplate(template=TEMPLATE, input_variables=["query", "df_head", "df_columns"]) # Template with memory # TODO: add result of expected code to memory; currently we only remember what code was run. TEMPLATE_WITH_MEMORY = """ You are working with a pandas dataframe in Python. The name of the dataframe is `df`. The dataframe has the following columns: {df_columns}. You are interacting with a programmer. The programmer issues commands and you should translate them into Python code and execute them. This is the history of your interaction so far: {chat_history} Human: {query} Go! ```python df.head() ``` ```output {df_head} ``` ```python """ PROMPT_WITH_MEMORY = PromptTemplate( template=TEMPLATE_WITH_MEMORY, input_variables=["chat_history", "query", "df_head", "df_columns"] ) def set_llm(llm: BaseLLM) -> None: global DEFAULT_LLM DEFAULT_LLM = llm def get_chain(llm: Optional[BaseLLM] = None, use_memory: bool = True) -> Chain: """Get chain to use.""" if llm is None: if DEFAULT_LLM is None: llm_config_path = os.environ.get("LLPANDAS_LLM_CONFIGURATION") if llm_config_path is None: llm = OpenAI(temperature=0) else: llm = load_llm(llm_config_path) else: llm = DEFAULT_LLM if use_memory: memory = ConversationBufferMemory(memory_key="chat_history", input_key="query") chain = LLMChain(llm=llm, prompt=PROMPT_WITH_MEMORY, memory=memory) else: chain = LLMChain(llm=llm, prompt=PROMPT) return chain
[ "langchain.PromptTemplate", "langchain.chains.conversation.memory.ConversationBufferMemory", "langchain.llms.loading.load_llm", "langchain.LLMChain", "langchain.OpenAI" ]
[((825, 914), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'TEMPLATE', 'input_variables': "['query', 'df_head', 'df_columns']"}), "(template=TEMPLATE, input_variables=['query', 'df_head',\n 'df_columns'])\n", (839, 914), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((1501, 1619), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'TEMPLATE_WITH_MEMORY', 'input_variables': "['chat_history', 'query', 'df_head', 'df_columns']"}), "(template=TEMPLATE_WITH_MEMORY, input_variables=[\n 'chat_history', 'query', 'df_head', 'df_columns'])\n", (1515, 1619), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((2171, 2241), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'input_key': '"""query"""'}), "(memory_key='chat_history', input_key='query')\n", (2195, 2241), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((2258, 2317), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'PROMPT_WITH_MEMORY', 'memory': 'memory'}), '(llm=llm, prompt=PROMPT_WITH_MEMORY, memory=memory)\n', (2266, 2317), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((2344, 2376), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'PROMPT'}), '(llm=llm, prompt=PROMPT)\n', (2352, 2376), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((1895, 1939), 'os.environ.get', 'os.environ.get', (['"""LLPANDAS_LLM_CONFIGURATION"""'], {}), "('LLPANDAS_LLM_CONFIGURATION')\n", (1909, 1939), False, 'import os\n'), ((2002, 2023), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (2008, 2023), False, 'from langchain import LLMChain, OpenAI, PromptTemplate\n'), ((2064, 2089), 'langchain.llms.loading.load_llm', 'load_llm', (['llm_config_path'], {}), '(llm_config_path)\n', (2072, 2089), False, 'from langchain.llms.loading import load_llm\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Project : AI. @by PyCharm # @File : promptwatch # @Time : 2023/7/13 10:03 # @Author : betterme # @WeChat : meutils # @Software : PyCharm # @Description : import os from meutils.pipe import * from langchain import OpenAI, LLMChain, PromptTemplate from promptwatch import PromptWatch, register_prompt_template prompt_template = PromptTemplate.from_template("这是个prompt: {input}") prompt_template = register_prompt_template("name_of_your_template", prompt_template) my_chain = LLMChain(llm=OpenAI(streaming=True), prompt=prompt_template) with PromptWatch(api_key=os.getenv('PROMPT_WATCH_API_KEY')) as pw: my_chain("1+1=")
[ "langchain.PromptTemplate.from_template", "langchain.OpenAI" ]
[((417, 467), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""这是个prompt: {input}"""'], {}), "('这是个prompt: {input}')\n", (445, 467), False, 'from langchain import OpenAI, LLMChain, PromptTemplate\n'), ((486, 552), 'promptwatch.register_prompt_template', 'register_prompt_template', (['"""name_of_your_template"""', 'prompt_template'], {}), "('name_of_your_template', prompt_template)\n", (510, 552), False, 'from promptwatch import PromptWatch, register_prompt_template\n'), ((577, 599), 'langchain.OpenAI', 'OpenAI', ([], {'streaming': '(True)'}), '(streaming=True)\n', (583, 599), False, 'from langchain import OpenAI, LLMChain, PromptTemplate\n'), ((651, 684), 'os.getenv', 'os.getenv', (['"""PROMPT_WATCH_API_KEY"""'], {}), "('PROMPT_WATCH_API_KEY')\n", (660, 684), False, 'import os\n')]
import sys from typing import Any import readline from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory import colorama from callbacks import handlers from config import config from i18n import text from utils import utils from agent.agent import create_agent from walrus.toolkit import WalrusToolKit from k8s.toolkit import KubernetesToolKit last_error = None def setup_agent() -> Any: config.init() colorama.init() llm = ChatOpenAI( model_name="gpt-4", temperature=0, callbacks=[handlers.PrintReasoningCallbackHandler()], ) text.init_system_messages(llm) memory = ConversationBufferMemory(memory_key="chat_history") enabled_toolkits = [ toolkit.lower() for toolkit in config.APPILOT_CONFIG.toolkits ] tools = [] if "kubernetes" in enabled_toolkits: kubernetes_toolkit = KubernetesToolKit(llm=llm) tools.extend(kubernetes_toolkit.get_tools()) elif "walrus" in enabled_toolkits: walrus_toolkit = WalrusToolKit(llm=llm) tools.extend(walrus_toolkit.get_tools()) else: print(text.get("enable_no_toolkit")) sys.exit(1) return create_agent( llm, shared_memory=memory, tools=tools, verbose=config.APPILOT_CONFIG.verbose, ) def run(): appilot_agent = setup_agent() print(text.get("welcome")) user_query = None while True: user_query = input(">") if utils.is_inform_sent(): continue elif user_query == "exit": break elif user_query == "appilot_log": print_last_error() continue elif user_query.startswith("#"): continue elif not user_query.strip(): continue try: result = appilot_agent.run(user_query) except handlers.HumanRejectedException as he: utils.print_rejected_message() continue except Exception as e: handle_exception(e) continue utils.print_ai_response(result) def handle_exception(e): global last_error print(text.get("response_prefix"), end="") print(text.get("error_occur_message")) last_error = e def print_last_error(): if last_error is None: print(text.get("response_prefix"), end="") print(text.get("no_error_message")) else: print(last_error)
[ "langchain.memory.ConversationBufferMemory" ]
[((442, 455), 'config.config.init', 'config.init', ([], {}), '()\n', (453, 455), False, 'from config import config\n'), ((460, 475), 'colorama.init', 'colorama.init', ([], {}), '()\n', (473, 475), False, 'import colorama\n'), ((623, 653), 'i18n.text.init_system_messages', 'text.init_system_messages', (['llm'], {}), '(llm)\n', (648, 653), False, 'from i18n import text\n'), ((668, 719), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""'}), "(memory_key='chat_history')\n", (692, 719), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1211, 1307), 'agent.agent.create_agent', 'create_agent', (['llm'], {'shared_memory': 'memory', 'tools': 'tools', 'verbose': 'config.APPILOT_CONFIG.verbose'}), '(llm, shared_memory=memory, tools=tools, verbose=config.\n APPILOT_CONFIG.verbose)\n', (1223, 1307), False, 'from agent.agent import create_agent\n'), ((908, 934), 'k8s.toolkit.KubernetesToolKit', 'KubernetesToolKit', ([], {'llm': 'llm'}), '(llm=llm)\n', (925, 934), False, 'from k8s.toolkit import KubernetesToolKit\n'), ((1400, 1419), 'i18n.text.get', 'text.get', (['"""welcome"""'], {}), "('welcome')\n", (1408, 1419), False, 'from i18n import text\n'), ((1502, 1524), 'utils.utils.is_inform_sent', 'utils.is_inform_sent', ([], {}), '()\n', (1522, 1524), False, 'from utils import utils\n'), ((2090, 2121), 'utils.utils.print_ai_response', 'utils.print_ai_response', (['result'], {}), '(result)\n', (2113, 2121), False, 'from utils import utils\n'), ((2181, 2208), 'i18n.text.get', 'text.get', (['"""response_prefix"""'], {}), "('response_prefix')\n", (2189, 2208), False, 'from i18n import text\n'), ((2228, 2259), 'i18n.text.get', 'text.get', (['"""error_occur_message"""'], {}), "('error_occur_message')\n", (2236, 2259), False, 'from i18n import text\n'), ((1052, 1074), 'walrus.toolkit.WalrusToolKit', 'WalrusToolKit', ([], {'llm': 'llm'}), '(llm=llm)\n', (1065, 1074), False, 'from walrus.toolkit import WalrusToolKit\n'), ((1187, 1198), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1195, 1198), False, 'import sys\n'), ((2347, 2374), 'i18n.text.get', 'text.get', (['"""response_prefix"""'], {}), "('response_prefix')\n", (2355, 2374), False, 'from i18n import text\n'), ((2398, 2426), 'i18n.text.get', 'text.get', (['"""no_error_message"""'], {}), "('no_error_message')\n", (2406, 2426), False, 'from i18n import text\n'), ((569, 609), 'callbacks.handlers.PrintReasoningCallbackHandler', 'handlers.PrintReasoningCallbackHandler', ([], {}), '()\n', (607, 609), False, 'from callbacks import handlers\n'), ((1148, 1177), 'i18n.text.get', 'text.get', (['"""enable_no_toolkit"""'], {}), "('enable_no_toolkit')\n", (1156, 1177), False, 'from i18n import text\n'), ((1945, 1975), 'utils.utils.print_rejected_message', 'utils.print_rejected_message', ([], {}), '()\n', (1973, 1975), False, 'from utils import utils\n')]
"""Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]: import cohere min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(cohere.error.CohereError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.generate(**kwargs) return _completion_with_retry(**kwargs) class Cohere(LLM): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" max_retries: int = 10 """Maximum number of retries to make when generating.""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Project : AI. @by PyCharm # @File : chatpicture # @Time : 2023/8/23 13:56 # @Author : betterme # @WeChat : meutils # @Software : PyCharm # @Description : 增加代理 根据意图选择 OCR类型 from meutils.pipe import * from meutils.ai_cv.ocr_api import OCR class ChatPicture(object): def __init__(self): pass if __name__ == '__main__': img = Path("/Users/betterme/PycharmProjects/AI/aizoo/aizoo/api/港澳台通行证.webp").read_bytes() print(OCR.basic_accurate(img)) from langchain.chat_models import ChatOpenAI from langchain.chains import LLMChain from chatllm.llmchain.prompts.prompt_templates import CHAT_CONTEXT_PROMPT llm = ChatOpenAI() prompt = CHAT_CONTEXT_PROMPT context = json.dumps(OCR.basic_accurate(img), ensure_ascii=False) # c = LLMChain(llm=llm, prompt=prompt) # print(c.run(context=context, question="出生日期是?")) print(context)
[ "langchain.chat_models.ChatOpenAI" ]
[((732, 744), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (742, 744), False, 'from langchain.chat_models import ChatOpenAI\n'), ((526, 549), 'meutils.ai_cv.ocr_api.OCR.basic_accurate', 'OCR.basic_accurate', (['img'], {}), '(img)\n', (544, 549), False, 'from meutils.ai_cv.ocr_api import OCR\n'), ((804, 827), 'meutils.ai_cv.ocr_api.OCR.basic_accurate', 'OCR.basic_accurate', (['img'], {}), '(img)\n', (822, 827), False, 'from meutils.ai_cv.ocr_api import OCR\n')]
from typing import List from pydantic import BaseModel, Field from langchain.agents import AgentExecutor, Tool from langchain.llms.base import BaseLLM from .agent.base import AutonomousAgent class ExecutionAgent(BaseModel): agent: AgentExecutor = Field(...) @classmethod def from_llm(cls, llm: BaseLLM, objective: str, tools: List[Tool], verbose: bool = True) -> "ExecutionAgent": agent = AutonomousAgent.from_llm_and_tools(llm=llm, tools=tools, objective=objective, verbose=verbose) agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=verbose) return cls(agent=agent_executor) def execute_task(self, task: str, context: str) -> str: for i in range(3): try: return self.agent.run({"input": task, "context": context}) except ValueError: print(f"Value error running executor agent. Will retry {2-i} times") return "Failed to execute task."
[ "langchain.agents.AgentExecutor.from_agent_and_tools" ]
[((254, 264), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (259, 264), False, 'from pydantic import BaseModel, Field\n'), ((533, 610), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': 'verbose'}), '(agent=agent, tools=tools, verbose=verbose)\n', (567, 610), False, 'from langchain.agents import AgentExecutor, Tool\n')]
# process_text.py from lib.chat.setup import openai_embeddings from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from langchain.document_loaders.csv_loader import CSVLoader import requests import json import chardet def read_input_files(file_paths): combined_articles = [] for file_path in file_paths: content = read_file(file_path) combined_articles.append(content) return combined_articles def split_texts(all_combined_articles): all_texts = [] text_splitter = RecursiveCharacterTextSplitter( chunk_size=4096, chunk_overlap=256, length_function=len ) for combined_articles in all_combined_articles: all_texts.append(text_splitter.split_text(combined_articles)) return all_texts def create_documents(all_texts, prefixes): all_docs = [] for texts, prefix in zip(all_texts, prefixes): metadatas = [{"SOURCES": f"{prefix}{i}"} for i in range(len(texts))] docs = [Document(id=f'{prefix}{i}', page_content=texts[i], metadata=metadatas[i]) for i in range(len(texts))] all_docs.extend(docs) return all_docs def get_fieldnames_from_csv(csv_path): with open(csv_path, newline="", encoding="utf-8") as csvfile: fieldnames = csvfile.readline().strip().split(';') return fieldnames def documents_from_csv(csv_path,fieldnames): loader = CSVLoader(file_path=csv_path, csv_args={ 'delimiter': ';', 'quotechar': '"', 'fieldnames': fieldnames }) docs = loader.load() return docs def create_vectordb_from_docs(docs, persist_directory=None): vectordb = Chroma.from_documents(documents=docs, embedding=openai_embeddings, persist_directory=persist_directory) if persist_directory: vectordb.persist() return vectordb def load_persisted_chromadb(persist_directory): vectordb = Chroma(persist_directory=persist_directory, embedding_function=openai_embeddings) return vectordb def read_file(file_path): with open(file_path, 'rb') as file: raw_data = file.read() detected_encoding = chardet.detect(raw_data)['encoding'] content = raw_data.decode(detected_encoding, errors='ignore') return content def write_file(file_path, content): with open(file_path, 'w', encoding='utf-8') as file: file.write(content) def split_text(text, max_size=16 * 1024): words = text.split(' ') chunks = [] current_chunk = [] for word in words: if len(' '.join(current_chunk)) + len(word) < max_size: current_chunk.append(word) else: chunks.append(' '.join(current_chunk)) current_chunk = [word] if current_chunk: chunks.append(' '.join(current_chunk)) return chunks def translate_text(text, api_key, source_lang='EL', target_lang='EN'): url = 'https://api-free.deepl.com/v2/translate' headers = {'Authorization': f'DeepL-Auth-Key {api_key}'} data = {'text': text, 'source_lang': source_lang, 'target_lang': target_lang} response = requests.post(url, headers=headers, data=data) try: translations = json.loads(response.text)['translations'] return translations[0]['text'] except json.JSONDecodeError: print(f"Error: status code {response.status_code}, response content: {response.text}") return "" def translate_documents(file_paths, api_key, source_lang, target_lang): translated_files = [] for file_path in file_paths: text = read_file(file_path) chunks = split_text(text) translated_chunks = [translate_text(chunk, api_key, source_lang, target_lang) for chunk in chunks] translated_text = ' '.join(translated_chunks) translated_file_path = f"{file_path[:-4]}_translated.txt" write_file(translated_file_path, translated_text) translated_files.append(translated_file_path) return translated_files
[ "langchain.vectorstores.Chroma.from_documents", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.document_loaders.csv_loader.CSVLoader", "langchain.docstore.document.Document", "langchain.vectorstores.Chroma" ]
[((612, 703), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(4096)', 'chunk_overlap': '(256)', 'length_function': 'len'}), '(chunk_size=4096, chunk_overlap=256,\n length_function=len)\n', (642, 703), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1649, 1755), 'langchain.document_loaders.csv_loader.CSVLoader', 'CSVLoader', ([], {'file_path': 'csv_path', 'csv_args': '{\'delimiter\': \';\', \'quotechar\': \'"\', \'fieldnames\': fieldnames}'}), '(file_path=csv_path, csv_args={\'delimiter\': \';\', \'quotechar\': \'"\',\n \'fieldnames\': fieldnames})\n', (1658, 1755), False, 'from langchain.document_loaders.csv_loader import CSVLoader\n'), ((1992, 2099), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', ([], {'documents': 'docs', 'embedding': 'openai_embeddings', 'persist_directory': 'persist_directory'}), '(documents=docs, embedding=openai_embeddings,\n persist_directory=persist_directory)\n', (2013, 2099), False, 'from langchain.vectorstores import Chroma\n'), ((2234, 2320), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'persist_directory', 'embedding_function': 'openai_embeddings'}), '(persist_directory=persist_directory, embedding_function=\n openai_embeddings)\n', (2240, 2320), False, 'from langchain.vectorstores import Chroma\n'), ((3434, 3480), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'data'}), '(url, headers=headers, data=data)\n', (3447, 3480), False, 'import requests\n'), ((2481, 2505), 'chardet.detect', 'chardet.detect', (['raw_data'], {}), '(raw_data)\n', (2495, 2505), False, 'import chardet\n'), ((1251, 1324), 'langchain.docstore.document.Document', 'Document', ([], {'id': 'f"""{prefix}{i}"""', 'page_content': 'texts[i]', 'metadata': 'metadatas[i]'}), "(id=f'{prefix}{i}', page_content=texts[i], metadata=metadatas[i])\n", (1259, 1324), False, 'from langchain.docstore.document import Document\n'), ((3518, 3543), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (3528, 3543), False, 'import json\n')]
import re from typing import List from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter from langchain.docstore.document import Document as LCDocument class MarkDownSplitter(TextSplitter): '''To split markdown''' def split_text(self, text: str) -> List[str]: if self.count_token(text) < self._chunk_size: texts = [text] else: lines = text.split('\n') new_lines = self.remove_long_code(lines) markdown_splitter = RecursiveCharacterTextSplitter(chunk_size=self._chunk_size, chunk_overlap=0, length_function=self.count_token) documents = markdown_splitter.create_documents(['\n'.join(new_lines)]) texts = self._keep_parent_title(documents) return texts def remove_long_code(self, lines: List[str]) -> List[str]: new_lines = [] code = '' is_code = False for line in lines: line = self._strip_line(line) if line.startswith('```'): is_code = not is_code if is_code or line.startswith('```'): code = code + line else: if len(code) > 0 and self.count_token(code) <= self._chunk_size: new_lines.append(code) new_lines.append(line) code = '' return new_lines def _keep_parent_title(self, documents: List[LCDocument]) -> List[str]: docs = [] now_title_stack = [] for doc_chunk in documents: new_chunk = [] lines = doc_chunk.page_content.split('\n') for inner_idx, line in enumerate(lines): if line.strip() == '': continue if self._is_title(line): now_head_level = self._get_level(line) last_level_in_stack = self._get_last_level(now_title_stack) while now_head_level <= last_level_in_stack: now_title_stack.pop() last_level_in_stack = self._get_last_level(now_title_stack) now_title_stack.append(line) if inner_idx == 0 and line.strip() != '': new_chunk.extend(now_title_stack) if not self._is_title(line): new_chunk.append(line) else: new_chunk.append(line) docs.append('\n'.join(new_chunk)) return docs def count_token(self, doc): # todo # doc_len = len(re.findall(r'\w+', doc)) + len(re.findall(r'[^\w\s]', doc)) + len(re.findall(r'\n', doc)) // 4 doc_len = len(doc) // 3 return doc_len def _strip_line(self, l): l = re.sub(r'<(.*?)>', '', l) return l.lstrip() def _get_last_level(self, now_title_stack): if len(now_title_stack) > 0: return self._get_level(now_title_stack[-1]) else: return 0 def _get_level(self, line): return len(re.findall('#', line.split(' ')[0])) def _is_title(self, line): return line.split('# ')[0].replace('#', '') == ''
[ "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((2824, 2848), 're.sub', 're.sub', (['"""<(.*?)>"""', '""""""', 'l'], {}), "('<(.*?)>', '', l)\n", (2830, 2848), False, 'import re\n'), ((514, 628), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'self._chunk_size', 'chunk_overlap': '(0)', 'length_function': 'self.count_token'}), '(chunk_size=self._chunk_size, chunk_overlap=0,\n length_function=self.count_token)\n', (544, 628), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter\n')]
import os import json from dotenv import load_dotenv from langchain.agents import Tool from langchain.chat_models import ChatOpenAI from ai.ai_functions import get_company_info, get_intro_response from consts import company_handbook_faiss_path, llm_model_type, demo_company_name from utils import calculate_vesting # Load .env variables load_dotenv() # LLM Initialization openai_api_key = os.getenv("OPENAI_API_KEY") llm = ChatOpenAI(max_retries=3, temperature=0, # type: ignore model_name=llm_model_type) def tool_describe_skills(): """ This function creates a LangChain agent's tool that uses a generic LLM chain to introduce itself and give user suggestions for what it can do. """ return Tool(name="Introduction", func=lambda query: get_intro_response(query), description=f"""useful for questions like 'what can I ask', 'what can you do', 'what else can you do', 'what can I ask you', 'can you suggest some things I can ask'. Action Input is the user's direct query.""", return_direct=True) # type: ignore def tool_retrieve_company_info(): """ This function creates a LangChain agent's tool that uses QARetrieval chain to retrieve information from the company handbook based on a FAISS vectorstore. """ return Tool(name="Company Guidelines", func=lambda query: get_company_info(user_reply=query, index_path=company_handbook_faiss_path), description=f"""useful for questions about {demo_company_name}'s polices, work from home, IT, CEO, product, meeting conduct, diversity and inclusion (DEI), career progression, management tips, sales process, HR, team events, 1 on 1 guidelines, coaching, onboarding tips. Pass user's response directly to this tool""", return_direct=True) # type: ignore def tool_calculate_stock_options(): """ This function takes the user's starting date, their total amount of shares, and their vesting schedules and returns the number of shares they have vested so far along with shares that hasn't vested. """ return Tool(name="Stock Options/Shares calculator", func=lambda query: calculate_vesting(start_date=json.loads(query)["start_date"], total_shares=json.loads(query)["total_shares"], vesting_schedule=json.loads(query)["vesting_schedule"]), description=f"""useful for when asked about stock options and share calculations. Action Input should be a JSON object of a start_date (YYYY-MM-DD), total_shares and vesting_schedule (tuple of decimal numbers) keys.""", return_direct=False) # type: ignore
[ "langchain.chat_models.ChatOpenAI" ]
[((339, 352), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (350, 352), False, 'from dotenv import load_dotenv\n'), ((393, 420), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (402, 420), False, 'import os\n'), ((427, 494), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'max_retries': '(3)', 'temperature': '(0)', 'model_name': 'llm_model_type'}), '(max_retries=3, temperature=0, model_name=llm_model_type)\n', (437, 494), False, 'from langchain.chat_models import ChatOpenAI\n'), ((778, 803), 'ai.ai_functions.get_intro_response', 'get_intro_response', (['query'], {}), '(query)\n', (796, 803), False, 'from ai.ai_functions import get_company_info, get_intro_response\n'), ((1325, 1399), 'ai.ai_functions.get_company_info', 'get_company_info', ([], {'user_reply': 'query', 'index_path': 'company_handbook_faiss_path'}), '(user_reply=query, index_path=company_handbook_faiss_path)\n', (1341, 1399), False, 'from ai.ai_functions import get_company_info, get_intro_response\n'), ((2117, 2134), 'json.loads', 'json.loads', (['query'], {}), '(query)\n', (2127, 2134), False, 'import json\n'), ((2163, 2180), 'json.loads', 'json.loads', (['query'], {}), '(query)\n', (2173, 2180), False, 'import json\n'), ((2215, 2232), 'json.loads', 'json.loads', (['query'], {}), '(query)\n', (2225, 2232), False, 'import json\n')]
from langchain.prompts import PromptTemplate _symptom_extract_template = """Consider the following conversation patient note: Patient note: {note} Choose on of the symptoms to be the chief complaint (it is usually the first symptom mentioned). Provide your response strictly in the following format, replacing only the name_of_chief_complaint (keeping : yes), and refrain from including any additional text: <symptom> name_of_chief_complaint </symptom> """ _symptom_match_template = """Given the symptom: {symptom} which of the following retrievals is the best match? Retrievals: {retrievals} Select only one and write it below in the following format: <match> choice </match> Remember, do not include any other text, ensure your choice is in the provided retrievals, and follow the output format. """ CC_EXTRACT_PROMPT = PromptTemplate.from_template(_symptom_extract_template) CC_MATCH_PROMPT = PromptTemplate.from_template(_symptom_match_template)
[ "langchain.prompts.PromptTemplate.from_template" ]
[((830, 885), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_extract_template'], {}), '(_symptom_extract_template)\n', (858, 885), False, 'from langchain.prompts import PromptTemplate\n'), ((904, 957), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_match_template'], {}), '(_symptom_match_template)\n', (932, 957), False, 'from langchain.prompts import PromptTemplate\n')]
import re import string import traceback from collections import Counter import numpy as np import pandas as pd import tqdm from langchain.evaluation.qa import QAEvalChain from langchain.llms import OpenAI from algos.PWS import PWS_Base, PWS_Extra from algos.notool import CoT, IO from algos.react import ReactBase from nodes import LLMNode import openai def normalize_answer(s): def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def f1_score(prediction, ground_truth): normalized_prediction = normalize_answer(prediction) normalized_ground_truth = normalize_answer(ground_truth) if normalized_prediction in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth: return 0 if normalized_ground_truth in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth: return 0 prediction_tokens = normalized_prediction.split() ground_truth_tokens = normalized_ground_truth.split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def llm_accuracy_score(query, prediction, ground_truth): data = [{ 'query': query, 'answer': ground_truth, }] pred = [{ 'query': query, 'answer': ground_truth, 'result': prediction, }] eval_chain = QAEvalChain.from_llm(OpenAI( temperature=0, )) graded_outputs = eval_chain.evaluate(data, pred) return 1 if graded_outputs[0]['text'].strip() == 'CORRECT' else 0 class Evaluator: def __init__(self, task, dataset, algo, maxtry=3): assert task in ["hotpot_qa", "trivia_qa", "gsm8k", "physics_question", "disfl_qa", "sports_understanding", "strategy_qa", "sotu_qa"] assert isinstance(dataset, pd.DataFrame) assert isinstance(algo, (PWS_Base, PWS_Extra, ReactBase, IO, CoT)) self.task = task self.dataset = dataset self.algo = algo self.maxtry = maxtry self.failed_response = self._failed_response() self.eval_data = self._initialize_eval_dict() def run(self): print("\n******************* Start Evaluation *******************\n") if self.task in ["hotpot_qa", "sotu_qa"]: for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["question"][i] label = self.dataset["answer"][i] for _ in range(self.maxtry): try: response = self.algo.run(question) break except Exception: traceback.print_exc() response = self.failed_response self._update_eval_dict(question, label, response) elif self.task == "fever": for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["claim"][i] label = self.dataset["label"][i] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) elif self.task == "trivia_qa": for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["question"][i] label = self.dataset["answer"][i]["value"] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) elif self.task == "gsm8k": for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["question"][i] label = self.dataset["answer"][i].split("#### ")[1] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) elif self.task in ["physics_question", "sports_understanding", "strategy_qa"]: for i in tqdm.tqdm(range(len(self.dataset))): question = self.dataset["input"][i] label = self.dataset["target"][i] for _ in range(self.maxtry): try: response = self.algo.run(question) break except: response = self.failed_response self._update_eval_dict(question, label, response) else: raise NotImplementedError return self._get_avg_results(), self.eval_data def _initialize_eval_dict(self): data = {} for d in ["label", "preds", "em", "f1", "acc", "wall_time", "total_tokens", "total_cost", "steps", "token_cost", "tool_cost", "planner_log", "solver_log"]: data[d] = [] return data def _update_eval_dict(self, question, label, response): print("=== Planner ===" + '\n\n' + response.get("planner_log", '') + '\n' + "=== Solver ===" + '\n\n' + response.get("solver_log", '')) pred = self._parse_prediction(response["output"]) self.eval_data["label"] += [label] self.eval_data["preds"] += [pred] self.eval_data["em"] += [self.get_metrics(question, label, pred)["em"]] self.eval_data["f1"] += [self.get_metrics(question, label, pred)["f1"]] self.eval_data["acc"] += [self.get_metrics(question, label, pred)["acc"]] self.eval_data["wall_time"] += [response["wall_time"]] self.eval_data["total_tokens"] += [response["total_tokens"]] self.eval_data["total_cost"] += [response["total_cost"]] self.eval_data["steps"] += [response["steps"]] self.eval_data["token_cost"] += [response["token_cost"]] self.eval_data["tool_cost"] += [response["tool_cost"]] LLMNode.refresh('succ' if self.get_metrics(question, label, pred)["acc"] else 'fail') if "planner_log" in response: self.eval_data["planner_log"] += [response["planner_log"]] if "solver_log" in response: self.eval_data["solver_log"] += [response["solver_log"]] def _get_avg_results(self): result = {} result["avg_em"] = np.nanmean(self.eval_data["em"]) result["avg_f1"] = np.nanmean(self.eval_data["f1"]) result["avg_acc"] = np.nanmean(self.eval_data["acc"]) result["avg_wall_time"] = np.nanmean(self.eval_data["wall_time"]) result["avg_total_tokens"] = np.nanmean(self.eval_data["total_tokens"]) result["avg_total_cost"] = np.nanmean(self.eval_data["total_cost"]) result["avg_steps"] = np.nanmean(self.eval_data["steps"]) result["avg_token_cost"] = np.nanmean(self.eval_data["token_cost"]) result["avg_tool_cost"] = np.nanmean(self.eval_data["tool_cost"]) return result def get_metrics(self, query, label, pred): if pred is None: return {'em': 0, 'f1': 0} norm_label = normalize_answer(label) norm_pred = normalize_answer(pred) em = (norm_pred == norm_label) f1 = f1_score(norm_pred, norm_label) acc = llm_accuracy_score(query, pred, label) return {'em': em, 'f1': f1, 'acc': acc} def _parse_prediction(self, output): if isinstance(self.algo, IO): return str(output).strip("\n") elif isinstance(self.algo, CoT): return str(output).split("\n")[-1].replace("Answer:", "") elif isinstance(self.algo, ReactBase): return str(output).strip("\n") elif isinstance(self.algo, PWS_Base): return str(output).strip("\n") elif isinstance(self.algo, PWS_Extra): return str(output).strip("\n") def _failed_response(self): resposne = {} for key in ["input", "output", "wall_time", "total_tokens", "total_cost", "steps", "token_cost", "tool_cost"]: resposne[key] = np.nan return resposne
[ "langchain.llms.OpenAI" ]
[((432, 469), 're.sub', 're.sub', (['"""\\\\b(a|an|the)\\\\b"""', '""" """', 'text'], {}), "('\\\\b(a|an|the)\\\\b', ' ', text)\n", (438, 469), False, 'import re\n'), ((1337, 1363), 'collections.Counter', 'Counter', (['prediction_tokens'], {}), '(prediction_tokens)\n', (1344, 1363), False, 'from collections import Counter\n'), ((1366, 1394), 'collections.Counter', 'Counter', (['ground_truth_tokens'], {}), '(ground_truth_tokens)\n', (1373, 1394), False, 'from collections import Counter\n'), ((1933, 1954), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1939, 1954), False, 'from langchain.llms import OpenAI\n'), ((7211, 7243), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['em']"], {}), "(self.eval_data['em'])\n", (7221, 7243), True, 'import numpy as np\n'), ((7271, 7303), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['f1']"], {}), "(self.eval_data['f1'])\n", (7281, 7303), True, 'import numpy as np\n'), ((7332, 7365), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['acc']"], {}), "(self.eval_data['acc'])\n", (7342, 7365), True, 'import numpy as np\n'), ((7400, 7439), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['wall_time']"], {}), "(self.eval_data['wall_time'])\n", (7410, 7439), True, 'import numpy as np\n'), ((7477, 7519), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['total_tokens']"], {}), "(self.eval_data['total_tokens'])\n", (7487, 7519), True, 'import numpy as np\n'), ((7555, 7595), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['total_cost']"], {}), "(self.eval_data['total_cost'])\n", (7565, 7595), True, 'import numpy as np\n'), ((7626, 7661), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['steps']"], {}), "(self.eval_data['steps'])\n", (7636, 7661), True, 'import numpy as np\n'), ((7697, 7737), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['token_cost']"], {}), "(self.eval_data['token_cost'])\n", (7707, 7737), True, 'import numpy as np\n'), ((7772, 7811), 'numpy.nanmean', 'np.nanmean', (["self.eval_data['tool_cost']"], {}), "(self.eval_data['tool_cost'])\n", (7782, 7811), True, 'import numpy as np\n'), ((3209, 3230), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3228, 3230), False, 'import traceback\n')]
import json from langchain.schema.messages import SystemMessage from langchain.output_parsers.json import parse_partial_json from creator.code_interpreter import CodeInterpreter, language_map from creator.config.library import config from creator.utils import load_system_prompt, remove_tips from creator.llm.llm_creator import create_llm from .base import BaseAgent DEBUGGING_TIPS = load_system_prompt(config.tips_for_testing_prompt_path) VERIFY_TIPS = load_system_prompt(config.tips_for_veryfy_prompt_path) class CodeTesterAgent(BaseAgent): total_tries: int = 10 output_key: str = "output" allow_user_confirm: bool = config.run_human_confirm @property def _chain_type(self): return "CodeTesterAgent" def postprocess_mesasge(self, message): function_call = message.additional_kwargs.get("function_call", None) if function_call is not None: name = function_call.get("name", "run_code") arguments = function_call.get("arguments", "{}") arguments_json = parse_partial_json(arguments) if name not in ("run_code", "test_summary") or not arguments_json: language = name if name in language_map else "python" function_call = { "name": "run_code", "arguments": json.dumps({"language": language, "code": arguments}, ensure_ascii=False) } message.additional_kwargs["function_call"] = function_call return message def messages_hot_fix(self, langchain_messages): langchain_messages = remove_tips(langchain_messages) tool_result = langchain_messages[-1].content tool_result = parse_partial_json(tool_result) if len(tool_result.get("stderr", "")) > 0 and "error" in tool_result["stderr"].lower(): # add tips for debugging langchain_messages.append(SystemMessage(content=DEBUGGING_TIPS)) else: langchain_messages.append(SystemMessage(content=VERIFY_TIPS)) return langchain_messages def parse_output(self, messages): function_call = messages[-1].get("function_call", None) test_summary = None if function_call is not None: function_name = function_call.get("name", "") arguments = parse_partial_json(function_call.get("arguments", "{}")) if function_name == "test_summary": test_summary = arguments.get("test_cases", []) messages = messages[:-1] return { "output":{ "messages": messages, "test_summary": test_summary, } } def create_code_tester_agent(llm): template = load_system_prompt(config.tester_agent_prompt_path) tool = CodeInterpreter() code_interpreter_function_schema = tool.to_function_schema() with open(config.testsummary_function_schema_path, encoding="utf-8") as f: test_summary_function_schema = json.load(f) chain = CodeTesterAgent( llm=llm, system_template=template, function_schemas=[code_interpreter_function_schema, test_summary_function_schema], tools=[tool], verbose=False, ) return chain llm = create_llm(config) code_tester_agent = create_code_tester_agent(llm=llm)
[ "langchain.output_parsers.json.parse_partial_json", "langchain.schema.messages.SystemMessage" ]
[((389, 444), 'creator.utils.load_system_prompt', 'load_system_prompt', (['config.tips_for_testing_prompt_path'], {}), '(config.tips_for_testing_prompt_path)\n', (407, 444), False, 'from creator.utils import load_system_prompt, remove_tips\n'), ((459, 513), 'creator.utils.load_system_prompt', 'load_system_prompt', (['config.tips_for_veryfy_prompt_path'], {}), '(config.tips_for_veryfy_prompt_path)\n', (477, 513), False, 'from creator.utils import load_system_prompt, remove_tips\n'), ((3253, 3271), 'creator.llm.llm_creator.create_llm', 'create_llm', (['config'], {}), '(config)\n', (3263, 3271), False, 'from creator.llm.llm_creator import create_llm\n'), ((2727, 2778), 'creator.utils.load_system_prompt', 'load_system_prompt', (['config.tester_agent_prompt_path'], {}), '(config.tester_agent_prompt_path)\n', (2745, 2778), False, 'from creator.utils import load_system_prompt, remove_tips\n'), ((2790, 2807), 'creator.code_interpreter.CodeInterpreter', 'CodeInterpreter', ([], {}), '()\n', (2805, 2807), False, 'from creator.code_interpreter import CodeInterpreter, language_map\n'), ((1607, 1638), 'creator.utils.remove_tips', 'remove_tips', (['langchain_messages'], {}), '(langchain_messages)\n', (1618, 1638), False, 'from creator.utils import load_system_prompt, remove_tips\n'), ((1714, 1745), 'langchain.output_parsers.json.parse_partial_json', 'parse_partial_json', (['tool_result'], {}), '(tool_result)\n', (1732, 1745), False, 'from langchain.output_parsers.json import parse_partial_json\n'), ((2992, 3004), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3001, 3004), False, 'import json\n'), ((1045, 1074), 'langchain.output_parsers.json.parse_partial_json', 'parse_partial_json', (['arguments'], {}), '(arguments)\n', (1063, 1074), False, 'from langchain.output_parsers.json import parse_partial_json\n'), ((1906, 1943), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': 'DEBUGGING_TIPS'}), '(content=DEBUGGING_TIPS)\n', (1919, 1943), False, 'from langchain.schema.messages import SystemMessage\n'), ((1997, 2031), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': 'VERIFY_TIPS'}), '(content=VERIFY_TIPS)\n', (2010, 2031), False, 'from langchain.schema.messages import SystemMessage\n'), ((1331, 1404), 'json.dumps', 'json.dumps', (["{'language': language, 'code': arguments}"], {'ensure_ascii': '(False)'}), "({'language': language, 'code': arguments}, ensure_ascii=False)\n", (1341, 1404), False, 'import json\n')]
import requests from typing import Any, Dict, Optional from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT from langchain.chains import APIChain from langchain.prompts import BasePromptTemplate from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from .requests_l402 import RequestsL402Wrapper from .requests_l402 import ResponseTextWrapper from lightning import LightningNode class L402APIChain(APIChain): requests_wrapper: Any @classmethod def from_llm_and_api_docs( cls, llm: BaseLanguageModel, api_docs: str, headers: Optional[dict] = None, api_url_prompt: BasePromptTemplate = API_URL_PROMPT, api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, lightning_node = None, **kwargs: Any, ) -> APIChain: """Load chain from just an LLM and the api docs.""" requests_L402 = RequestsL402Wrapper(lightning_node, requests) lang_chain_request_L402 = ResponseTextWrapper( requests_wrapper=requests_L402, ) get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls( api_request_chain=get_request_chain, api_answer_chain=get_answer_chain, requests_wrapper=lang_chain_request_L402, api_docs=api_docs, **kwargs, )
[ "langchain.chains.llm.LLMChain" ]
[((1139, 1179), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (1147, 1179), False, 'from langchain.chains.llm import LLMChain\n'), ((1207, 1252), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (1215, 1252), False, 'from langchain.chains.llm import LLMChain\n')]
from langchain.agents.tools import Tool from langchain.chains import LLMMathChain from langchain.chat_models import ChatOpenAI from langchain.llms import OpenAI from langchain_experimental.plan_and_execute import ( PlanAndExecute, load_agent_executor, load_chat_planner, ) llm = OpenAI(temperature=0) llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True) tools = [ Tool( name="Calculator", func=llm_math_chain.run, description="useful for when you need to answer questions about math", ), ] model = ChatOpenAI(temperature=0) planner = load_chat_planner(model) executor = load_agent_executor(model, tools, verbose=True) agent = PlanAndExecute(planner=planner, executor=executor, verbose=True) if __name__ == "__main__": agent.run( "Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?" )
[ "langchain_experimental.plan_and_execute.load_chat_planner", "langchain.chat_models.ChatOpenAI", "langchain.llms.OpenAI", "langchain.chains.LLMMathChain.from_llm", "langchain.agents.tools.Tool", "langchain_experimental.plan_and_execute.load_agent_executor", "langchain_experimental.plan_and_execute.PlanAndExecute" ]
[((292, 313), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (298, 313), False, 'from langchain.llms import OpenAI\n'), ((331, 375), 'langchain.chains.LLMMathChain.from_llm', 'LLMMathChain.from_llm', ([], {'llm': 'llm', 'verbose': '(True)'}), '(llm=llm, verbose=True)\n', (352, 375), False, 'from langchain.chains import LLMMathChain\n'), ((553, 578), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (563, 578), False, 'from langchain.chat_models import ChatOpenAI\n'), ((590, 614), 'langchain_experimental.plan_and_execute.load_chat_planner', 'load_chat_planner', (['model'], {}), '(model)\n', (607, 614), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((626, 673), 'langchain_experimental.plan_and_execute.load_agent_executor', 'load_agent_executor', (['model', 'tools'], {'verbose': '(True)'}), '(model, tools, verbose=True)\n', (645, 673), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((682, 746), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)'}), '(planner=planner, executor=executor, verbose=True)\n', (696, 746), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((390, 514), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Calculator"""', 'func': 'llm_math_chain.run', 'description': '"""useful for when you need to answer questions about math"""'}), "(name='Calculator', func=llm_math_chain.run, description=\n 'useful for when you need to answer questions about math')\n", (394, 514), False, 'from langchain.agents.tools import Tool\n')]
"""Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.hyde.base import HypotheticalDocumentEmbedder from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.base import LLMBashChain from langchain.chains.llm_checker.base import LLMCheckerChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.llm_requests import LLMRequestsChain from langchain.chains.pal.base import PALChain from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.sql_database.base import SQLDatabaseChain from langchain.chains.vector_db_qa.base import VectorDBQA from langchain.llms.loading import load_llm, load_llm_from_config from langchain.prompts.loading import load_prompt, load_prompt_from_config from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: """Load LLM chain from config dict.""" if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return LLMChain(llm=llm, prompt=prompt, **config) def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: """Load hypothetical document embedder chain from config dict.""" if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "embeddings" in kwargs: embeddings = kwargs.pop("embeddings") else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings, **config ) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) else: raise ValueError( "One of `document_prompt` or `document_prompt_path` must be present." ) return StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, **config ) def _load_map_reduce_documents_chain( config: dict, **kwargs: Any ) -> MapReduceDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif "combine_document_chain_path" in config: combine_document_chain = load_chain(config.pop("combine_document_chain_path")) else: raise ValueError( "One of `combine_document_chain` or " "`combine_document_chain_path` must be present." ) if "collapse_document_chain" in config: collapse_document_chain_config = config.pop("collapse_document_chain") if collapse_document_chain_config is None: collapse_document_chain = None else: collapse_document_chain = load_chain_from_config( collapse_document_chain_config ) elif "collapse_document_chain_path" in config: collapse_document_chain = load_chain(config.pop("collapse_document_chain_path")) return MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=combine_document_chain, collapse_document_chain=collapse_document_chain, **config, ) def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and 'memory' for the chain if "verbose" in kwargs: config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs)
[ "langchain.chains.llm.LLMChain", "langchain.chains.qa_with_sources.base.QAWithSourcesChain", "langchain.chains.api.base.APIChain", "langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain", "langchain.chains.hyde.base.HypotheticalDocumentEmbedder", "langchain.chains.pal.base.PALChain", "langchain.utilities.loading.try_load_from_hub", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.chains.llm_requests.LLMRequestsChain", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.chains.combine_documents.refine.RefineDocumentsChain", "langchain.chains.vector_db_qa.base.VectorDBQA", "langchain.prompts.loading.load_prompt_from_config", "langchain.chains.llm_bash.base.LLMBashChain", "langchain.chains.llm_math.base.LLMMathChain", "langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain", "langchain.chains.sql_database.base.SQLDatabaseChain", "langchain.llms.loading.load_llm_from_config", "langchain.chains.llm_checker.base.LLMCheckerChain" ]
[((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')]
"""Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class HuggingFaceEndpoint(LLM): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) values["huggingfacehub_api_token"] = huggingfacehub_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] elif self.task == "summarization": text = generated_text[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')]
"""Clear Weaviate index.""" import logging import os import weaviate from langchain.embeddings import OpenAIEmbeddings from langchain.indexes import SQLRecordManager, index from langchain.vectorstores import Weaviate logger = logging.getLogger(__name__) WEAVIATE_URL = os.environ["WEAVIATE_URL"] WEAVIATE_API_KEY = os.environ["WEAVIATE_API_KEY"] RECORD_MANAGER_DB_URL = os.environ["RECORD_MANAGER_DB_URL"] WEAVIATE_DOCS_INDEX_NAME = "LangChain_Combined_Docs_OpenAI_text_embedding_3_small" def clear(): client = weaviate.Client( url=WEAVIATE_URL, auth_client_secret=weaviate.AuthApiKey(api_key=WEAVIATE_API_KEY), ) vectorstore = Weaviate( client=client, index_name=WEAVIATE_DOCS_INDEX_NAME, text_key="text", embedding=OpenAIEmbeddings(), by_text=False, attributes=["source", "title"], ) record_manager = SQLRecordManager( f"weaviate/{WEAVIATE_DOCS_INDEX_NAME}", db_url=RECORD_MANAGER_DB_URL ) record_manager.create_schema() indexing_stats = index( [], record_manager, vectorstore, cleanup="full", source_id_key="source", ) logger.info("Indexing stats: ", indexing_stats) logger.info( "LangChain now has this many vectors: ", client.query.aggregate(WEAVIATE_DOCS_INDEX_NAME).with_meta_count().do(), ) if __name__ == "__main__": clear()
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.indexes.SQLRecordManager", "langchain.indexes.index" ]
[((228, 255), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (245, 255), False, 'import logging\n'), ((893, 984), 'langchain.indexes.SQLRecordManager', 'SQLRecordManager', (['f"""weaviate/{WEAVIATE_DOCS_INDEX_NAME}"""'], {'db_url': 'RECORD_MANAGER_DB_URL'}), "(f'weaviate/{WEAVIATE_DOCS_INDEX_NAME}', db_url=\n RECORD_MANAGER_DB_URL)\n", (909, 984), False, 'from langchain.indexes import SQLRecordManager, index\n'), ((1051, 1129), 'langchain.indexes.index', 'index', (['[]', 'record_manager', 'vectorstore'], {'cleanup': '"""full"""', 'source_id_key': '"""source"""'}), "([], record_manager, vectorstore, cleanup='full', source_id_key='source')\n", (1056, 1129), False, 'from langchain.indexes import SQLRecordManager, index\n'), ((590, 635), 'weaviate.AuthApiKey', 'weaviate.AuthApiKey', ([], {'api_key': 'WEAVIATE_API_KEY'}), '(api_key=WEAVIATE_API_KEY)\n', (609, 635), False, 'import weaviate\n'), ((782, 800), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (798, 800), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
import logging logging.basicConfig(level=logging.CRITICAL) import os from pathlib import Path import openai from dotenv import load_dotenv from langchain.chat_models import ChatOpenAI from llama_index import ( GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage, ) from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file load_dotenv() openai.api_key = os.environ["OPENAI_API_KEY"] history = [] llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.618, model_name=models["gpt-3"], max_tokens=256)) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=1024) def make_index(file): cls() print("👀 Loading...") PDFReader = download_loader("PDFReader") loader = PDFReader() documents = loader.load_data(file=Path(FILES) / file) if os.path.exists(Path(CACHE) / file): print("📚 Index found in cache") return else: print("📚 Index not found in cache, creating it...") index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context) index.storage_context.persist(persist_dir=Path(CACHE) / file) def chat(file_name, index): while True: prompt = input("\n😎 Prompt: ") if prompt == "exit": handle_exit() elif prompt == "save": handle_save(str(file_name), history) query_engine = index.as_query_engine(response_mode="compact") response = query_engine.query(prompt) print("\n👻 Response: " + str(response)) history.append({"user": prompt, "response": str(response)}) def ask(file_name): try: print("👀 Loading...") storage_context = StorageContext.from_defaults(persist_dir=Path(CACHE) / file_name) index = load_index_from_storage(storage_context, service_context=service_context) cls() print("✅ Ready! Let's start the conversation") print("ℹ️ Press Ctrl+C to exit") chat(file_name, index) except KeyboardInterrupt: handle_exit() if __name__ == "__main__": initialize() file = select_file() if file: file_name = Path(file).name make_index(file_name) ask(file_name) else: print("No files found") handle_exit()
[ "langchain.chat_models.ChatOpenAI" ]
[((16, 59), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.CRITICAL'}), '(level=logging.CRITICAL)\n', (35, 59), False, 'import logging\n'), ((444, 457), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (455, 457), False, 'from dotenv import load_dotenv\n'), ((644, 729), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size_limit': '(1024)'}), '(llm_predictor=llm_predictor, chunk_size_limit=1024\n )\n', (672, 729), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((753, 758), 'utils.cls', 'cls', ([], {}), '()\n', (756, 758), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((802, 830), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (817, 830), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((2171, 2183), 'utils.initialize', 'initialize', ([], {}), '()\n', (2181, 2183), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2195, 2208), 'utils.select_file', 'select_file', ([], {}), '()\n', (2206, 2208), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((551, 624), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.618)', 'model_name': "models['gpt-3']", 'max_tokens': '(256)'}), "(temperature=0.618, model_name=models['gpt-3'], max_tokens=256)\n", (561, 624), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1099, 1177), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1133, 1177), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((1871, 1944), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1894, 1944), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, download_loader, load_index_from_storage\n'), ((1953, 1958), 'utils.cls', 'cls', ([], {}), '()\n', (1956, 1958), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2361, 2374), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (2372, 2374), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((937, 948), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (941, 948), False, 'from pathlib import Path\n'), ((1374, 1387), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (1385, 1387), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2124, 2137), 'utils.handle_exit', 'handle_exit', ([], {}), '()\n', (2135, 2137), False, 'from utils import CACHE, FILES, models, cls, handle_save, handle_exit, initialize, select_file\n'), ((2242, 2252), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (2246, 2252), False, 'from pathlib import Path\n'), ((894, 905), 'pathlib.Path', 'Path', (['FILES'], {}), '(FILES)\n', (898, 905), False, 'from pathlib import Path\n'), ((1228, 1239), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (1232, 1239), False, 'from pathlib import Path\n'), ((1830, 1841), 'pathlib.Path', 'Path', (['CACHE'], {}), '(CACHE)\n', (1834, 1841), False, 'from pathlib import Path\n')]
"""This is the logic for ingesting PDF and DOCX files into LangChain.""" import os from pathlib import Path from langchain.text_splitter import RecursiveCharacterTextSplitter from pdfminer.high_level import extract_text from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import FAISS from dotenv import load_dotenv import faiss, pickle, docx2txt load_dotenv() OPENAI_API_TOKEN = os.getenv('OPENAI_API_TOKEN') # Here we extract the text from your pdf files. files = list(Path("docs/").glob("**/*.pdf")) count = 0 for file in files: count += 1 filename = "docs/" + "pdf" + str(count) + ".txt" text = extract_text(file) with open(filename, 'w') as f: f.write(text) # Here we extract the text from your docx files. files = list(Path("docs/").glob("**/*.docx")) count = 0 for file in files: count += 1 filename = "docs/" + "docx" + str(count) + ".txt" text = docx2txt.process(file) with open(filename, 'w') as f: f.write(text) # Here we load in the data from the text files created above. ps = list(Path("docs/").glob("**/*.txt")) data = [] sources = [] for p in ps: with open(p) as f: data.append(f.read()) sources.append(p) # Here we split the documents, as needed, into smaller chunks. # We do this due to the context limits of the LLMs. text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=500, length_function=len) docs = [] metadatas = [] for i, d in enumerate(data): splits = text_splitter.split_text(d) docs.extend(splits) metadatas.extend([{"source": sources[i]}] * len(splits)) # Here we create a vector store from the documents and save it to disk. store = FAISS.from_texts(docs, OpenAIEmbeddings(), metadatas=metadatas) faiss.write_index(store.index, "stax_docs.index") store.index = None with open("stax_faiss_store.pkl", "wb") as f: pickle.dump(store, f)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.embeddings.OpenAIEmbeddings" ]
[((374, 387), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (385, 387), False, 'from dotenv import load_dotenv\n'), ((408, 437), 'os.getenv', 'os.getenv', (['"""OPENAI_API_TOKEN"""'], {}), "('OPENAI_API_TOKEN')\n", (417, 437), False, 'import os\n'), ((1354, 1445), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(500)', 'length_function': 'len'}), '(chunk_size=1500, chunk_overlap=500,\n length_function=len)\n', (1384, 1445), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1770, 1819), 'faiss.write_index', 'faiss.write_index', (['store.index', '"""stax_docs.index"""'], {}), "(store.index, 'stax_docs.index')\n", (1787, 1819), False, 'import faiss, pickle, docx2txt\n'), ((641, 659), 'pdfminer.high_level.extract_text', 'extract_text', (['file'], {}), '(file)\n', (653, 659), False, 'from pdfminer.high_level import extract_text\n'), ((924, 946), 'docx2txt.process', 'docx2txt.process', (['file'], {}), '(file)\n', (940, 946), False, 'import faiss, pickle, docx2txt\n'), ((1729, 1747), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1745, 1747), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1889, 1910), 'pickle.dump', 'pickle.dump', (['store', 'f'], {}), '(store, f)\n', (1900, 1910), False, 'import faiss, pickle, docx2txt\n'), ((501, 514), 'pathlib.Path', 'Path', (['"""docs/"""'], {}), "('docs/')\n", (505, 514), False, 'from pathlib import Path\n'), ((782, 795), 'pathlib.Path', 'Path', (['"""docs/"""'], {}), "('docs/')\n", (786, 795), False, 'from pathlib import Path\n'), ((1078, 1091), 'pathlib.Path', 'Path', (['"""docs/"""'], {}), "('docs/')\n", (1082, 1091), False, 'from pathlib import Path\n')]
import os from langchain.llms.bedrock import Bedrock from langchain import PromptTemplate def get_llm(): model_kwargs = { "maxTokenCount": 1024, "stopSequences": [], "temperature": 0, "topP": 0.9 } llm = Bedrock( # credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default) region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default) endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary) model_id="amazon.titan-tg1-large", #use the Anthropic Claude model model_kwargs=model_kwargs) #configure the properties for Claude return llm def get_prompt(user_input, template): prompt_template = PromptTemplate.from_template(template) #this will automatically identify the input variables for the template prompt = prompt_template.format(user_input=user_input) return prompt def get_text_response(user_input, template): #text-to-text client function llm = get_llm() prompt = get_prompt(user_input, template) return llm.predict(prompt) #return a response to the prompt
[ "langchain.PromptTemplate.from_template" ]
[((844, 882), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (872, 882), False, 'from langchain import PromptTemplate\n'), ((437, 470), 'os.environ.get', 'os.environ.get', (['"""BWB_REGION_NAME"""'], {}), "('BWB_REGION_NAME')\n", (451, 470), False, 'import os\n'), ((536, 570), 'os.environ.get', 'os.environ.get', (['"""BWB_ENDPOINT_URL"""'], {}), "('BWB_ENDPOINT_URL')\n", (550, 570), False, 'import os\n')]
from langchain import PromptTemplate, LLMChain from langchain.document_loaders import TextLoader from langchain.embeddings import LlamaCppEmbeddings from langchain.llms import GPT4All from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.callbacks.base import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.vectorstores.faiss import FAISS # SCRIPT INFO: # # This script allows you to create a vectorstore from a file and query it with a question (hard coded). # # It shows how you could send questions to a GPT4All custom knowledge base and receive answers. # # If you want a chat style interface using a similar custom knowledge base, you can use the custom_chatbot.py script provided. # Setup gpt4all_path = './models/gpt4all-converted.bin' llama_path = './models/ggml-model-q4_0.bin' callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) loader = TextLoader('./docs/shortened_sotu.txt') embeddings = LlamaCppEmbeddings(model_path=llama_path) llm = GPT4All(model=gpt4all_path, callback_manager=callback_manager, verbose=True) # Split text def split_chunks(sources): chunks = [] splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=32) for chunk in splitter.split_documents(sources): chunks.append(chunk) return chunks def create_index(chunks): texts = [doc.page_content for doc in chunks] metadatas = [doc.metadata for doc in chunks] search_index = FAISS.from_texts(texts, embeddings, metadatas=metadatas) return search_index def similarity_search(query, index): matched_docs = index.similarity_search(query, k=4) sources = [] for doc in matched_docs: sources.append( { "page_content": doc.page_content, "metadata": doc.metadata, } ) return matched_docs, sources # Create Index # docs = loader.load() # chunks = split_chunks(docs) # index = create_index(chunks) # Save Index (use this to save the index for later use) # Comment the line below after running once successfully (IMPORTANT) # index.save_local("state_of_the_union_index") # Load Index (use this to load the index from a file, eg on your second time running things and beyond) # Uncomment the line below after running once successfully (IMPORTANT) index = FAISS.load_local("./full_sotu_index", embeddings) # Set your query here manually question = "Summarize the comments about NATO and its purpose." matched_docs, sources = similarity_search(question, index) template = """ Please use the following context to answer questions. Context: {context} --- Question: {question} Answer: Let's think step by step.""" context = "\n".join([doc.page_content for doc in matched_docs]) prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context) llm_chain = LLMChain(prompt=prompt, llm=llm) print(llm_chain.run(question))
[ "langchain.llms.GPT4All", "langchain.PromptTemplate", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.vectorstores.faiss.FAISS.load_local", "langchain.embeddings.LlamaCppEmbeddings", "langchain.document_loaders.TextLoader", "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler", "langchain.LLMChain", "langchain.vectorstores.faiss.FAISS.from_texts" ]
[((968, 1007), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./docs/shortened_sotu.txt"""'], {}), "('./docs/shortened_sotu.txt')\n", (978, 1007), False, 'from langchain.document_loaders import TextLoader\n'), ((1021, 1062), 'langchain.embeddings.LlamaCppEmbeddings', 'LlamaCppEmbeddings', ([], {'model_path': 'llama_path'}), '(model_path=llama_path)\n', (1039, 1062), False, 'from langchain.embeddings import LlamaCppEmbeddings\n'), ((1069, 1145), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'gpt4all_path', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model=gpt4all_path, callback_manager=callback_manager, verbose=True)\n', (1076, 1145), False, 'from langchain.llms import GPT4All\n'), ((2399, 2448), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['"""./full_sotu_index"""', 'embeddings'], {}), "('./full_sotu_index', embeddings)\n", (2415, 2448), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2941, 2973), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (2949, 2973), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1219, 1283), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(32)'}), '(chunk_size=256, chunk_overlap=32)\n', (1249, 1283), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1529, 1585), 'langchain.vectorstores.faiss.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {'metadatas': 'metadatas'}), '(texts, embeddings, metadatas=metadatas)\n', (1545, 1585), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((924, 956), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (954, 956), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((2829, 2903), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (2843, 2903), False, 'from langchain import PromptTemplate, LLMChain\n')]
import os from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma class Database: def __init__(self, directory): self.embeddings = OpenAIEmbeddings() self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) self.directory = directory self.files = os.listdir(self.directory) def list_files(self): if len(self.files) == 0: return None return self.files def save_or_add_to_transcripts(self, name, transcript): persist_directory = os.path.join(self.directory, name) if not os.path.exists(persist_directory): os.makedirs(persist_directory) transcript_file = os.path.join(persist_directory, "transcript.txt") with open(transcript_file, 'a') as f: f.write(transcript + "\n\n") def load_db(self, name): persist_directory = os.path.join(self.directory, name) transcript_file = os.path.join(persist_directory, "transcript.txt") with open(transcript_file, 'r') as f: transcript = f.read() split_docs = self.text_splitter.split_text(transcript) db = Chroma.from_texts(texts=split_docs, embedding=self.embeddings) return db
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.embeddings.OpenAIEmbeddings", "langchain.vectorstores.Chroma.from_texts" ]
[((251, 269), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (267, 269), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((299, 363), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(50)'}), '(chunk_size=500, chunk_overlap=50)\n', (329, 363), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((420, 446), 'os.listdir', 'os.listdir', (['self.directory'], {}), '(self.directory)\n', (430, 446), False, 'import os\n'), ((646, 680), 'os.path.join', 'os.path.join', (['self.directory', 'name'], {}), '(self.directory, name)\n', (658, 680), False, 'import os\n'), ((800, 849), 'os.path.join', 'os.path.join', (['persist_directory', '"""transcript.txt"""'], {}), "(persist_directory, 'transcript.txt')\n", (812, 849), False, 'import os\n'), ((996, 1030), 'os.path.join', 'os.path.join', (['self.directory', 'name'], {}), '(self.directory, name)\n', (1008, 1030), False, 'import os\n'), ((1057, 1106), 'os.path.join', 'os.path.join', (['persist_directory', '"""transcript.txt"""'], {}), "(persist_directory, 'transcript.txt')\n", (1069, 1106), False, 'import os\n'), ((1263, 1325), 'langchain.vectorstores.Chroma.from_texts', 'Chroma.from_texts', ([], {'texts': 'split_docs', 'embedding': 'self.embeddings'}), '(texts=split_docs, embedding=self.embeddings)\n', (1280, 1325), False, 'from langchain.vectorstores import Chroma\n'), ((696, 729), 'os.path.exists', 'os.path.exists', (['persist_directory'], {}), '(persist_directory)\n', (710, 729), False, 'import os\n'), ((743, 773), 'os.makedirs', 'os.makedirs', (['persist_directory'], {}), '(persist_directory)\n', (754, 773), False, 'import os\n')]
import os import re import time from typing import Any from dotenv import load_dotenv from langchain.callbacks.base import BaseCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.schema import LLMResult from slack_bolt import App from slack_bolt.adapter.socket_mode import SocketModeHandler CHAT_UPDATE_INTERVAL_SEC = 1 load_dotenv() # ボットトークンを使ってアプリを初期化します app = App( signing_secret=os.environ["SLACK_SIGNING_SECRET"], token=os.environ["SLACK_BOT_TOKEN"], process_before_response=True, ) class SlackStreamingCallbackHandler(BaseCallbackHandler): last_send_time = time.time() message = "" def __init__(self, channel, ts): self.channel = channel self.ts = ts def on_llm_new_token(self, token: str, **kwargs) -> None: self.message += token now = time.time() if now - self.last_send_time > CHAT_UPDATE_INTERVAL_SEC: self.last_send_time = now app.client.chat_update( channel=self.channel, ts=self.ts, text=f"{self.message}..." ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any: app.client.chat_update(channel=self.channel, ts=self.ts, text=self.message) @app.event("app_mention") def handle_mention(event, say): channel = event["channel"] thread_ts = event["ts"] message = re.sub("<@.*>", "", event["text"]) result = say("\n\nTyping...", thread_ts=thread_ts) ts = result["ts"] callback = SlackStreamingCallbackHandler(channel=channel, ts=ts) llm = ChatOpenAI( model_name=os.environ["OPENAI_API_MODEL"], temperature=os.environ["OPENAI_API_TEMPERATURE"], streaming=True, callbacks=[callback], ) llm.predict(message) # ソケットモードハンドラーを使ってアプリを起動します if __name__ == "__main__": SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
[ "langchain.chat_models.ChatOpenAI" ]
[((347, 360), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (358, 360), False, 'from dotenv import load_dotenv\n'), ((392, 518), 'slack_bolt.App', 'App', ([], {'signing_secret': "os.environ['SLACK_SIGNING_SECRET']", 'token': "os.environ['SLACK_BOT_TOKEN']", 'process_before_response': '(True)'}), "(signing_secret=os.environ['SLACK_SIGNING_SECRET'], token=os.environ[\n 'SLACK_BOT_TOKEN'], process_before_response=True)\n", (395, 518), False, 'from slack_bolt import App\n'), ((610, 621), 'time.time', 'time.time', ([], {}), '()\n', (619, 621), False, 'import time\n'), ((1365, 1399), 're.sub', 're.sub', (['"""<@.*>"""', '""""""', "event['text']"], {}), "('<@.*>', '', event['text'])\n", (1371, 1399), False, 'import re\n'), ((1558, 1704), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': "os.environ['OPENAI_API_MODEL']", 'temperature': "os.environ['OPENAI_API_TEMPERATURE']", 'streaming': '(True)', 'callbacks': '[callback]'}), "(model_name=os.environ['OPENAI_API_MODEL'], temperature=os.\n environ['OPENAI_API_TEMPERATURE'], streaming=True, callbacks=[callback])\n", (1568, 1704), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 848), 'time.time', 'time.time', ([], {}), '()\n', (846, 848), False, 'import time\n'), ((1826, 1879), 'slack_bolt.adapter.socket_mode.SocketModeHandler', 'SocketModeHandler', (['app', "os.environ['SLACK_APP_TOKEN']"], {}), "(app, os.environ['SLACK_APP_TOKEN'])\n", (1843, 1879), False, 'from slack_bolt.adapter.socket_mode import SocketModeHandler\n')]
import json from typing import Optional, Any from langchain.schema import AIMessage from langchain.schema.runnable import RunnableSerializable, RunnableConfig from pydantic import BaseModel class FunctionCall(BaseModel): name: str arguments: dict[str, Any] class ParseFunctionCall(RunnableSerializable[AIMessage, FunctionCall]): """ Parse OpenAI function call from an AI Message. Used in conjunction with functions bound to the LLM request. """ def invoke( self, input: AIMessage, config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> FunctionCall: if isinstance(input, dict): input = AIMessage(**input) additional_kwargs = input.additional_kwargs if "function_call" in additional_kwargs: function_call = additional_kwargs["function_call"] if isinstance(function_call, str): function_call = json.loads(function_call) if isinstance(function_call["arguments"], str): function_call["arguments"] = json.loads(function_call["arguments"]) return FunctionCall(**function_call) else: raise ValueError("No function call found in input.") async def ainvoke( self, input: AIMessage, config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> FunctionCall: if isinstance(input, dict): input = AIMessage(**input) additional_kwargs = input.additional_kwargs if "function_call" in additional_kwargs: function_call = additional_kwargs["function_call"] if isinstance(function_call, str): function_call = json.loads(function_call) if isinstance(function_call["arguments"], str): function_call["arguments"] = json.loads(function_call["arguments"]) return FunctionCall(**function_call) else: raise ValueError("No function call found in input.")
[ "langchain.schema.AIMessage" ]
[((682, 700), 'langchain.schema.AIMessage', 'AIMessage', ([], {}), '(**input)\n', (691, 700), False, 'from langchain.schema import AIMessage\n'), ((1460, 1478), 'langchain.schema.AIMessage', 'AIMessage', ([], {}), '(**input)\n', (1469, 1478), False, 'from langchain.schema import AIMessage\n'), ((945, 970), 'json.loads', 'json.loads', (['function_call'], {}), '(function_call)\n', (955, 970), False, 'import json\n'), ((1077, 1115), 'json.loads', 'json.loads', (["function_call['arguments']"], {}), "(function_call['arguments'])\n", (1087, 1115), False, 'import json\n'), ((1723, 1748), 'json.loads', 'json.loads', (['function_call'], {}), '(function_call)\n', (1733, 1748), False, 'import json\n'), ((1855, 1893), 'json.loads', 'json.loads', (["function_call['arguments']"], {}), "(function_call['arguments'])\n", (1865, 1893), False, 'import json\n')]
from langchain.chains.router import MultiPromptChain from langchain.chat_models import ChatOpenAI from dotenv import load_dotenv import os # A template for working with LangChain multi prompt chain. # It's a great way to let the large language model choose which prompts suits the question. # Load env files load_dotenv() openai_api_key = os.environ.get('openai_api_key') # Create the templates marketing_template = """ You are a skilled marketing professional. You have a deep understanding of market analysis, consumer behavior, branding, and digital marketing strategies. You can provide insightful recommendations and creative solutions to address various marketing-related questions. Here is a marketing-related question: {input}""" business_template = """ You are an experienced business expert. You possess knowledge in areas such as business strategy, entrepreneurship, market research, and financial analysis. You can provide practical insights and strategic advice to address various business-related questions. Here is a business-related question: {input}""" # Create prompt info prompt_infos = [ { "name": "marketing", "description": "Good for answering marketing questions", "prompt_template": marketing_template }, { "name": "business", "description": "Good for answering business-related questions", "prompt_template": business_template } ] # Create the chain llm = ChatOpenAI(openai_api_key=openai_api_key, model_name="gpt-3.5-turbo", temperature=0.3) chain = MultiPromptChain.from_prompts(llm=llm, prompt_infos=prompt_infos, verbose=True) # Example usage question = "What is the best way to finance a startup?" response = chain.run(question)
[ "langchain.chains.router.MultiPromptChain.from_prompts", "langchain.chat_models.ChatOpenAI" ]
[((310, 323), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (321, 323), False, 'from dotenv import load_dotenv\n'), ((341, 373), 'os.environ.get', 'os.environ.get', (['"""openai_api_key"""'], {}), "('openai_api_key')\n", (355, 373), False, 'import os\n'), ((1461, 1551), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'openai_api_key', 'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.3)'}), "(openai_api_key=openai_api_key, model_name='gpt-3.5-turbo',\n temperature=0.3)\n", (1471, 1551), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1556, 1635), 'langchain.chains.router.MultiPromptChain.from_prompts', 'MultiPromptChain.from_prompts', ([], {'llm': 'llm', 'prompt_infos': 'prompt_infos', 'verbose': '(True)'}), '(llm=llm, prompt_infos=prompt_infos, verbose=True)\n', (1585, 1635), False, 'from langchain.chains.router import MultiPromptChain\n')]
from dotenv import load_dotenv from src.slackbot import SlackBot from src.handlers import create_handlers import asyncio from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler handler = StreamingStdOutCallbackHandler() # Load environment variables load_dotenv() # Load custom tools import src.custom_tools as custom_tools tools = [custom_tools.disk_usage, custom_tools.memory_usage, custom_tools.asyncArxivQueryRun(max_workers=4), custom_tools.asyncDuckDuckGoSearchRun(max_workers=4), custom_tools.WebPilot(max_workers=4)] # You can load more tools using load_tools # from langchain.agents import load_tools # tools.extend(load_tools(['ddg-search', 'arxiv', 'requests_all'])) # Create SlackBot instance bot = SlackBot(name='SlackBot', verbose=True, max_tokens=500, model_type='openai', chunk_size=500, # Chunk size for splitter chunk_overlap=50, # Chunk overlap for splitter k_similarity=5, # Numbers of chunks to return in retriever log_filename='_slackbot.log', tools=tools, ) ## LLM configuration if bot.model_type == 'llama': config = dict(gpu_layers=32, temperature=0.8, batch_size=1024, context_length=2048, threads=6, stream=True, max_new_tokens=bot.max_tokens) else: config = dict(model_name="gpt-3.5-turbo-16k", temperature=0.8, streaming=True, max_tokens=bot.max_tokens) # Initialize LLM and embeddings bot.app.logger.info("Initializing LLM and embeddings...") bot.initialize_llm(max_tokens_threads=4000, config=config, callbacks=[handler]) bot.initialize_embeddings() # Create handlers for commands /ask, /modify_bot, /bot_info and bot mentions create_handlers(bot) ### You can create new handlers for other commands as follow # @bot.app.command("/foo") # async def handle_foo(say, respond, ack, command): # await ack() # # do something.. # Load bot in async mode async def start(): await bot.start() if __name__ == "__main__": logger = bot.app.logger try: asyncio.run(start()) logger.info('App started.') except KeyboardInterrupt: logger.info('App stopped by user.') except Exception as e: logger.info('App stopped due to error.') logger.error(str(e)) finally: logger.info('App stopped.')
[ "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler" ]
[((211, 243), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (241, 243), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((273, 286), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (284, 286), False, 'from dotenv import load_dotenv\n'), ((765, 943), 'src.slackbot.SlackBot', 'SlackBot', ([], {'name': '"""SlackBot"""', 'verbose': '(True)', 'max_tokens': '(500)', 'model_type': '"""openai"""', 'chunk_size': '(500)', 'chunk_overlap': '(50)', 'k_similarity': '(5)', 'log_filename': '"""_slackbot.log"""', 'tools': 'tools'}), "(name='SlackBot', verbose=True, max_tokens=500, model_type='openai',\n chunk_size=500, chunk_overlap=50, k_similarity=5, log_filename=\n '_slackbot.log', tools=tools)\n", (773, 943), False, 'from src.slackbot import SlackBot\n'), ((1764, 1784), 'src.handlers.create_handlers', 'create_handlers', (['bot'], {}), '(bot)\n', (1779, 1784), False, 'from src.handlers import create_handlers\n'), ((419, 465), 'src.custom_tools.asyncArxivQueryRun', 'custom_tools.asyncArxivQueryRun', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (450, 465), True, 'import src.custom_tools as custom_tools\n'), ((476, 528), 'src.custom_tools.asyncDuckDuckGoSearchRun', 'custom_tools.asyncDuckDuckGoSearchRun', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (513, 528), True, 'import src.custom_tools as custom_tools\n'), ((539, 575), 'src.custom_tools.WebPilot', 'custom_tools.WebPilot', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (560, 575), True, 'import src.custom_tools as custom_tools\n')]
#!/usr/bin/env python """A more complex example that shows how to configure index name at run time.""" from typing import Any, Iterable, List, Optional, Type from fastapi import FastAPI from langchain.embeddings import OpenAIEmbeddings from langchain.schema import Document from langchain.schema.embeddings import Embeddings from langchain.schema.retriever import BaseRetriever from langchain.schema.runnable import ( ConfigurableFieldSingleOption, RunnableConfig, RunnableSerializable, ) from langchain.schema.vectorstore import VST from langchain.vectorstores import FAISS, VectorStore from langserve import add_routes from langserve.pydantic_v1 import BaseModel, Field vectorstore1 = FAISS.from_texts( ["cats like fish", "dogs like sticks"], embedding=OpenAIEmbeddings() ) vectorstore2 = FAISS.from_texts(["x_n+1=a * xn * (1-xn)"], embedding=OpenAIEmbeddings()) app = FastAPI( title="LangChain Server", version="1.0", description="Spin up a simple api server using Langchain's Runnable interfaces", ) class UnderlyingVectorStore(VectorStore): """This is a fake vectorstore for demo purposes.""" def __init__(self, collection_name: str) -> None: """Fake vectorstore that has a collection name.""" self.collection_name = collection_name def as_retriever(self) -> BaseRetriever: if self.collection_name == "index1": return vectorstore1.as_retriever() elif self.collection_name == "index2": return vectorstore2.as_retriever() else: raise NotImplementedError( f"No retriever for collection {self.collection_name}" ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: raise NotImplementedError() @classmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: raise NotImplementedError() def similarity_search( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: raise NotImplementedError() class ConfigurableRetriever(RunnableSerializable[str, List[Document]]): """Create a custom retriever that can be configured by the user. This is an example of how to create a custom runnable that can be configured to use a different collection name at run time. Configuration involves instantiating a VectorStore with a collection name. at run time, so the underlying vectorstore should be *cheap* to instantiate. For example, it should not be making any network requests at instantiation time. Make sure that the vectorstore you use meets this criteria. """ collection_name: str def invoke( self, input: str, config: Optional[RunnableConfig] = None ) -> List[Document]: """Invoke the retriever.""" vectorstore = UnderlyingVectorStore(self.collection_name) retriever = vectorstore.as_retriever() return retriever.invoke(input, config=config) configurable_collection_name = ConfigurableRetriever( collection_name="index1" ).configurable_fields( collection_name=ConfigurableFieldSingleOption( id="collection_name", name="Collection Name", description="The name of the collection to use for the retriever.", options={ "Index 1": "index1", "Index 2": "index2", }, default="Index 1", ) ) class Request(BaseModel): __root__: str = Field(default="cat", description="Search query") add_routes(app, configurable_collection_name.with_types(input_type=Request)) if __name__ == "__main__": import uvicorn uvicorn.run(app, host="localhost", port=8000)
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.schema.runnable.ConfigurableFieldSingleOption" ]
[((893, 1027), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""LangChain Server"""', 'version': '"""1.0"""', 'description': '"""Spin up a simple api server using Langchain\'s Runnable interfaces"""'}), '(title=\'LangChain Server\', version=\'1.0\', description=\n "Spin up a simple api server using Langchain\'s Runnable interfaces")\n', (900, 1027), False, 'from fastapi import FastAPI\n'), ((3661, 3709), 'langserve.pydantic_v1.Field', 'Field', ([], {'default': '"""cat"""', 'description': '"""Search query"""'}), "(default='cat', description='Search query')\n", (3666, 3709), False, 'from langserve.pydantic_v1 import BaseModel, Field\n'), ((3841, 3886), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""localhost"""', 'port': '(8000)'}), "(app, host='localhost', port=8000)\n", (3852, 3886), False, 'import uvicorn\n'), ((774, 792), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (790, 792), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((865, 883), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (881, 883), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((3314, 3536), 'langchain.schema.runnable.ConfigurableFieldSingleOption', 'ConfigurableFieldSingleOption', ([], {'id': '"""collection_name"""', 'name': '"""Collection Name"""', 'description': '"""The name of the collection to use for the retriever."""', 'options': "{'Index 1': 'index1', 'Index 2': 'index2'}", 'default': '"""Index 1"""'}), "(id='collection_name', name='Collection Name',\n description='The name of the collection to use for the retriever.',\n options={'Index 1': 'index1', 'Index 2': 'index2'}, default='Index 1')\n", (3343, 3536), False, 'from langchain.schema.runnable import ConfigurableFieldSingleOption, RunnableConfig, RunnableSerializable\n')]
from itertools import chain import pandas as pd from datasets import Dataset from joblib import Parallel, delayed from langchain.text_splitter import RecursiveCharacterTextSplitter from tqdm import tqdm def sl_hf_dataset_for_tokenizer( sl, sl_dataset_name, tokenizer, max_length, margin=192, min_length=7 ): """ Create a HuggingFace dataset from a SpeakLeash dataset. params: sl: SpeakLeash object params: sl_dataset_name: SpeakLeash dataset name params: tokenizer: HuggingFace tokenizer params: max_length: maximum length of the tokenized text params: margin: margin subtract from the max_length (this helps to avoid RecursiveCharacterTextSplitter returning too many too long chunks) params: min_length: minimum length of the tokenized text returns: HuggingFace dataset """ corpus = sl.get(sl_dataset_name) # this is used only for length calculation def token_len(text): return len(tokenizer.encode(text, max_length=None, truncation=False)) text_splitter = RecursiveCharacterTextSplitter( chunk_size=max_length - margin, chunk_overlap=0, length_function=token_len, ) dataset = Parallel(n_jobs=-1)( delayed(text_splitter.split_text)(document) for document in tqdm(corpus.data, total=corpus.manifest["stats"]["documents"]) ) dataset = list(chain.from_iterable(dataset)) df = pd.DataFrame(dataset, columns=["text"]) hf_dataset = Dataset.from_pandas(df) hf_dataset = hf_dataset.map( lambda examples: tokenizer(examples["text"], truncation=True, max_length=max_length), batched=True, ) # Filter out samples that have input_ids exceeding max_length hf_dataset = hf_dataset.filter( lambda sample: min_length <= len(sample["input_ids"]) < max_length ) return hf_dataset
[ "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((1048, 1158), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(max_length - margin)', 'chunk_overlap': '(0)', 'length_function': 'token_len'}), '(chunk_size=max_length - margin,\n chunk_overlap=0, length_function=token_len)\n', (1078, 1158), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1426, 1465), 'pandas.DataFrame', 'pd.DataFrame', (['dataset'], {'columns': "['text']"}), "(dataset, columns=['text'])\n", (1438, 1465), True, 'import pandas as pd\n'), ((1483, 1506), 'datasets.Dataset.from_pandas', 'Dataset.from_pandas', (['df'], {}), '(df)\n', (1502, 1506), False, 'from datasets import Dataset\n'), ((1201, 1220), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1209, 1220), False, 'from joblib import Parallel, delayed\n'), ((1386, 1414), 'itertools.chain.from_iterable', 'chain.from_iterable', (['dataset'], {}), '(dataset)\n', (1405, 1414), False, 'from itertools import chain\n'), ((1230, 1263), 'joblib.delayed', 'delayed', (['text_splitter.split_text'], {}), '(text_splitter.split_text)\n', (1237, 1263), False, 'from joblib import Parallel, delayed\n'), ((1298, 1360), 'tqdm.tqdm', 'tqdm', (['corpus.data'], {'total': "corpus.manifest['stats']['documents']"}), "(corpus.data, total=corpus.manifest['stats']['documents'])\n", (1302, 1360), False, 'from tqdm import tqdm\n')]
from contextlib import contextmanager import uuid import os import tiktoken from . import S2_tools as scholar import csv import sys import requests # pdf loader from langchain.document_loaders import OnlinePDFLoader ## paper questioning tools from llama_index import Document from llama_index.vector_stores import PineconeVectorStore from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext from llama_index.embeddings.openai import OpenAIEmbedding def PaperSearchAndDownload(query): # make new workspace if not os.path.exists( os.path.join(os.getcwd(),'workspaces') ): os.mkdir(os.path.join(os.getcwd(),'workspaces')) workspace_dir_name = os.path.join(os.getcwd(),'workspaces',query.split()[0] + '_'+ str(uuid.uuid4().hex)) os.mkdir(workspace_dir_name) os.mkdir(os.path.join(workspace_dir_name,'results')) os.mkdir(os.path.join(workspace_dir_name,'refy_suggestions')) os.environ['workspace'] = workspace_dir_name # 1) search papers print(' 1) Searching base papers') papers = scholar.find_paper_from_query(query, result_limit=10) if len(papers == 0): papers = scholar.find_paper_from_query(query, result_limit=50) scholar.update_dataframe(incomplete=papers, dest=os.path.join(workspace_dir_name, 'results','papers.csv')) delete_duplicates_from_csv(csv_file=os.path.join(workspace_dir_name, 'results','papers.csv')) # 2) Cross-reference reccomendation system: # a paper is reccomended if and only if it's related to more than one paper print('\n\n 2) Expanding with Scholar reccomendations') counts = {} candidates = {} for paper in papers: guesses = scholar.find_recommendations(paper) for guess in guesses: if not guess['isOpenAccess']: continue candidates[guess['title']] = guess if guess['title'] not in counts.keys(): counts[guess['title']] = 1 else: counts[guess['title']] += 1 # reccomend only papers that appeared more than once reccomends = [] for key in counts: if counts[key]>1: reccomends.append(candidates[key]) print(f'found {len(reccomends)} additional papers') # update the csv scholar.update_dataframe(incomplete= reccomends, dest=os.path.join(workspace_dir_name, 'results','papers.csv')) delete_duplicates_from_csv(csv_file=os.path.join(workspace_dir_name, 'results','papers.csv')) # download the papers (1/2) print('downloading papers (1/2)') with open(os.path.join(workspace_dir_name,'results','papers.csv'), 'r',encoding='utf-8') as fp: csvfile = csv.DictReader(fp) scholar.download_pdf_from_id(" ".join( row['paperId'] for row in csvfile), workspace_dir_name) scholar.write_bib_file(csv_file=os.path.join(workspace_dir_name,'results','papers.csv'), bib_file=os.path.join(workspace_dir_name,'results','papers.bib')) # expand further with refy reccomendendation system print('\n\n 3) Expanding with Refy reccomendendation system') print('this might take a while...') scholar.refy_reccomend(bib_path=os.path.join(workspace_dir_name,'results','papers.bib')) with open(os.path.join(workspace_dir_name, 'refy_suggestions', 'test.csv'), 'r',encoding='utf-8') as fp: csvfile = csv.DictReader(fp) for row in csvfile: title = scholar.replace_non_alphanumeric(row['title']) title = title.replace(" ","_") save_path = os.path.join(workspace_dir_name,'refy_suggestions',(title+'.pdf')) try: download_paper(url=row['url'], save_path=save_path) except: print(f'couldn t download {row}') return f'{os.path.join(os.getcwd(), workspace_dir_name)}' import urllib def download_paper(url, save_path=f"{uuid.uuid4().hex}.pdf"): success_string = f"paper saved successfully at {os.path.join(os.path.abspath(save_path))}" if url.endswith('.pdf'): urllib.request.urlretrieve(url, save_path) return success_string if 'doi' in url: doi = paper_id = "/".join(url.split("/")[-2:]) # Construct the Crossref API URL print(doi) doi_url = f"https://doi.org/{doi}" # Send a GET request to the doi.org URL response = requests.get(doi_url, allow_redirects=True) # Check if the request was successful if response.status_code == 200: # Extract the final URL after redirection url = response.url if 'arxiv' in url: # URL del paper su arXiv # Ottieni l'ID del paper dall'URL paper_id = url.split("/")[-1] # Costruisci l'URL di download del paper pdf_url = f"http://arxiv.org/pdf/{paper_id}.pdf" # Scarica il paper in formato PDF urllib.request.urlretrieve(pdf_url, save_path) return success_string else: if '/full' in url: urllib.request.urlretrieve(url.replace('/full','/pdf')) return success_string if 'plos.org' in url: final_url = url.replace('article?', 'article/file?') urllib.request.urlretrieve(final_url, save_path) return success_string return f'\nfailed to download {url}' def download_bibtex_library(csv_path): with open(csv_path, 'r',encoding='utf-8') as fp: csvfile = csv.DictReader(fp) for row in csvfile: title = scholar.replace_non_alphanumeric(row['title']) title = title.replace(" ","-") save_path = os.path.join(os.path.join(csv_path, '..', title+'.pdf')) try: download_paper(url=row['url'], save_path=save_path) except: try: download_paper(url=row['url']+'.pdf', save_path=save_path) except: print(f'couldn t download {row}') def generate_chunks(text, CHUNK_LENGTH = 4000): enc = tiktoken.encoding_for_model("gpt-4") tokens = enc.encode(text) token_chunks = [tokens[i:i + CHUNK_LENGTH] for i in range(0, len(tokens), CHUNK_LENGTH)] word_chunks = [enc.decode(chunk) for chunk in token_chunks] return word_chunks from langchain.vectorstores import Chroma, Pinecone from langchain.embeddings.openai import OpenAIEmbeddings import pinecone import langid import time # def process_pdf_folder(folder_path): # if not os.path.exists(folder_path): # return 'the folder does not exist, check your spelling' # for item in os.listdir(folder_path): # if not item.endswith('.pdf'):continue # with open(os.path.join(folder_path,'SUMMARY.txt'), 'a', encoding='UTF-8') as write_file: # write_file.write(item) # write_file.write("\n\n\n") # txt = summarize_pdf(item, model='Vicuna') # try: # write_file.write(txt) # except: # print(txt) # with open(os.path.join(folder_path,'SUMMARY.txt'), 'r', encoding='UTF-8') as read_file: # return read_file.read() # # def summarize_pdf(pdf_path, model= None): # text = readPDF(pdf_path) # # according to the TLDR Model, consider smaller chunks # text_chunks = generate_chunks(text, 700) # if model is not None: # summarizer = LocalSearchEngine(tldr_model=model) # summary='' # for chunk in text_chunks: # summary += summarizer.tldr(chunk) # return summary def get_result_path(path, exclude = []): for item in os.listdir(path): if item == 'papers.csv': return os.path.join(path, item) if os.path.isdir(os.path.join(path, item)) and item not in exclude: res = get_result_path(os.path.join(path, item)) if res: return res return def get_workspace_titles(workspace_name): csv_file_path = get_result_path(workspace_name) papers_available = [] with open(csv_file_path, 'r', encoding='utf-8') as file: csv_file = csv.DictReader(file) for row in csv_file: papers_available.append(row['title']) return papers_available import re def same_title(title1, title2): try: title1 = re.sub(r'[^a-zA-Z]', ' ', title1) title2 = re.sub(r'[^a-zA-Z]', ' ', title2) except: return False words1 = set(title1.lower().split()) words2 = set(title2.lower().split()) return words1 == words2 or words1 <= words2 or words1 >= words2 def glimpse_pdf(title): # find papers.csv in workspace for workspace_name in os.listdir('workspaces'): csv_file_path = get_result_path(workspace_name) if csv_file_path is None: return 'no paper found' with open(csv_file_path, 'r', encoding='utf-8') as file: csv_file = csv.DictReader(file) for row in csv_file: if same_title(row['title'], title): return f"{row['title']}, paperId: {row['paperId']}, summary: {row['abstract']}" return f'\nno paper found with title {title}' def count_tokens(text): enc = tiktoken.encoding_for_model("gpt-4") tokens = enc.encode(text) return len(tokens) def readPDF(pdf_path): loader = OnlinePDFLoader(pdf_path) data = loader.load() text_content = '' for page in data: formatted_content = page.page_content.replace('\n\n', ' ') text_content+=formatted_content return text_content def get_pdf_path(dir, exclude=[]): paths = [] for item in os.listdir(dir): itempath = os.path.join(dir,item) if item.endswith('.pdf'): paths.append(itempath) if os.path.isdir(itempath)and item not in exclude: subpaths = get_pdf_path(itempath) for i in subpaths: paths.append(i) return paths def delete_duplicates_from_csv(csv_file): print('verifying duplicates...') to_delete = [] def delete_csv_row_by_title(csv_file, title): # Read the CSV file and store rows in a list with open(csv_file, 'r',encoding='UTF-8') as file: reader = csv.DictReader(file) rows = list(reader) # Find the row index with the matching title row_index = None for index, row in enumerate(rows): if row['title'] == title: row_index = index break # If no matching title is found, return if row_index is None: print(f"No row with title '{title}' found.") return # Remove the row from the list del rows[row_index] # Write the updated rows back to the CSV file with open(csv_file, 'w', newline='',encoding='UTF-8') as file: fieldnames = reader.fieldnames writer = csv.DictWriter(file, fieldnames=fieldnames) writer.writeheader() writer.writerows(rows) with open(csv_file, 'r', encoding='UTF-8') as file: DELETED = 0 reader = csv.DictReader(file) rows = list(reader) entries = set() for row in rows: if row['title']=='' or row['title'] is None: continue if row['title'] not in entries:entries.add(row['title']) else: DELETED+=1 to_delete.append(row['title']) for title in to_delete: delete_csv_row_by_title(csv_file, title=title) print(f"Deleted {DELETED} duplicates") return def update_workspace_dataframe(workspace, verbose = True): ADDED = 0 # find results.csv csv_path = get_result_path(workspace) # get titles in csv titles = get_workspace_titles(workspace) # get local papers path paths = get_pdf_path(workspace, exclude='refy_suggestions') # adding new to csv: for path in paths: exists = False # extract the title from the local paper title = scholar.extract_title(path) for t in titles: if same_title(t,title): exists = True # add it to dataframe if it was not found on the DF if not exists: if verbose: print(f"\nnew paper detected: {title}") # find it with online paper = scholar.find_paper_online(path) if paper : if verbose: print(f"\t---> best match found online: {paper['title']} " ) for t in titles: if same_title(paper['title'], title): if verbose: print(f"\t this paper is already present in the dataframe. skipping") else: if verbose: print(path, '-x-> no match found') continue with open(csv_path, 'a', encoding='utf-8') as fp: areYouSure = True for t in titles: if same_title(t,paper['title']): areYouSure =False if not areYouSure: if verbose: print(f"double check revealed that the paper is already in the dataframe. Skipping") continue if verbose: print(f"\t---> adding {paper['title']}") ADDED +=1 paper_authors = paper.get('authors', []) journal_data = {} if 'journal' in paper: journal_data = paper.get('journal',[]) if journal_data is not None: if 'name' not in journal_data: journal_data['name'] = '' if 'pages' not in journal_data: journal_data['pages'] = '' if paper.get('tldr',[]) != []:tldr = paper['tldr']['text'] elif paper.get('summary',[]) != []:tldr = paper['summary'] elif 'abstract' in paper:tldr = paper['abstract'] else: tldr = 'No summary available' if 'year' in paper: year = paper['year'] elif 'updated' in paper:year = paper['updated'] else: year = '' if 'citationStyles' in paper: if 'bibtex' in paper['citationStyles']: citStyle = paper['citationStyles']['bibtex'] else: citStyle = paper['citationStyles'][0] else: citStyle = '' csvfile = csv.DictWriter(fp, ['paperId', 'title', 'first_author', 'year', 'abstract','tldr','bibtex','influentialCitationCount','venue','journal','pages']) try: csvfile.writerow({ 'title': paper['title'], 'first_author': paper_authors[0]['name'] if paper_authors else '', 'year': year, 'abstract': paper['abstract'] if 'abstract' in paper else '', 'paperId': paper['paperId'] if 'paperId' in paper else '', 'tldr':tldr, 'bibtex':citStyle, 'influentialCitationCount': paper['influentialCitationCount'] if 'influentialCitationCount' in paper else '0', 'venue':paper['venue'] if 'venue' in paper else '', 'journal':journal_data['name'] if journal_data is not None else '', 'pages':journal_data['pages'] if journal_data is not None else '', }) except Exception as e: if verbose: print('could not add ', title, '\n',e) # delete dupes if present if verbose: print(f"\n\nCSV UPDATE: Added {ADDED} new papers") # clean form dupes delete_duplicates_from_csv(csv_path) # update bib scholar.write_bib_file(csv_path) return def load_workspace(folderdir): docs =[] for item in os.listdir(folderdir): if item.endswith('.pdf'): print(f' > loading {item}') with suppress_stdout(): content = readPDF(os.path.join(folderdir, item)) docs.append(Document( text = content, doc_id = uuid.uuid4().hex )) if item =='.'or item =='..':continue if os.path.isdir( os.path.join(folderdir,item) ): sub_docs = load_workspace(os.path.join(folderdir,item)) for doc in sub_docs: docs.append(doc) return docs # List paths of all pdf files in a folder def list_workspace_elements(folderdir): docs =[] for item in os.listdir(folderdir): if item.endswith('.pdf'): docs.append(rf"{os.path.join(folderdir,item)}") if item =='.'or item =='..':continue if os.path.isdir( os.path.join(folderdir,item) ): sub_docs = list_workspace_elements(os.path.join(folderdir,item)) for doc in sub_docs: docs.append(doc) return docs def llama_query_engine(docs:list, pinecone_index_name:str): pinecone.init( api_key= os.environ['PINECONE_API_KEY'], environment= os.environ['PINECONE_API_ENV'] ) # Find the pinecone index if pinecone_index_name not in pinecone.list_indexes(): # we create a new index pinecone.create_index( name=pinecone_index_name, metric='dotproduct', dimension=1536 # 1536 dim of text-embedding-ada-002 ) index = pinecone.Index(pinecone_index_name) # init it vector_store = PineconeVectorStore(pinecone_index=index) time.sleep(1) # setup our storage (vector db) storage_context = StorageContext.from_defaults( vector_store=vector_store ) embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100) service_context = ServiceContext.from_defaults(embed_model=embed_model) # populate the vector store LamaIndex = GPTVectorStoreIndex.from_documents( docs, storage_context=storage_context, service_context=service_context ) print('PINECONE Vector Index initialized:\n',index.describe_index_stats()) # init the query engine query_engine = LamaIndex.as_query_engine() return query_engine, LamaIndex @contextmanager def suppress_stdout(): with open(os.devnull, "w") as devnull: old_stdout = sys.stdout sys.stdout = devnull try: yield finally: sys.stdout = old_stdout
[ "langchain.document_loaders.OnlinePDFLoader" ]
[((768, 796), 'os.mkdir', 'os.mkdir', (['workspace_dir_name'], {}), '(workspace_dir_name)\n', (776, 796), False, 'import os\n'), ((5950, 5986), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (5977, 5986), False, 'import tiktoken\n'), ((7532, 7548), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7542, 7548), False, 'import os\n'), ((8565, 8589), 'os.listdir', 'os.listdir', (['"""workspaces"""'], {}), "('workspaces')\n", (8575, 8589), False, 'import os\n'), ((9082, 9118), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (9109, 9118), False, 'import tiktoken\n'), ((9209, 9234), 'langchain.document_loaders.OnlinePDFLoader', 'OnlinePDFLoader', (['pdf_path'], {}), '(pdf_path)\n', (9224, 9234), False, 'from langchain.document_loaders import OnlinePDFLoader\n'), ((9509, 9524), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (9519, 9524), False, 'import os\n'), ((15782, 15803), 'os.listdir', 'os.listdir', (['folderdir'], {}), '(folderdir)\n', (15792, 15803), False, 'import os\n'), ((16505, 16526), 'os.listdir', 'os.listdir', (['folderdir'], {}), '(folderdir)\n', (16515, 16526), False, 'import os\n'), ((16958, 17060), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_API_ENV']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_API_ENV'])\n", (16971, 17060), False, 'import pinecone\n'), ((17396, 17431), 'pinecone.Index', 'pinecone.Index', (['pinecone_index_name'], {}), '(pinecone_index_name)\n', (17410, 17431), False, 'import pinecone\n'), ((17470, 17511), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'index'}), '(pinecone_index=index)\n', (17489, 17511), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((17516, 17529), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17526, 17529), False, 'import time\n'), ((17589, 17644), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (17617, 17644), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((17678, 17747), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (17693, 17747), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((17770, 17823), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (17798, 17823), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((17878, 17988), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(docs, storage_context=storage_context,\n service_context=service_context)\n', (17912, 17988), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((692, 703), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (701, 703), False, 'import os\n'), ((810, 853), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""'], {}), "(workspace_dir_name, 'results')\n", (822, 853), False, 'import os\n'), ((867, 919), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""'], {}), "(workspace_dir_name, 'refy_suggestions')\n", (879, 919), False, 'import os\n'), ((2613, 2631), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (2627, 2631), False, 'import csv\n'), ((3285, 3303), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (3299, 3303), False, 'import csv\n'), ((3963, 4005), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'save_path'], {}), '(url, save_path)\n', (3989, 4005), False, 'import urllib\n'), ((4283, 4326), 'requests.get', 'requests.get', (['doi_url'], {'allow_redirects': '(True)'}), '(doi_url, allow_redirects=True)\n', (4295, 4326), False, 'import requests\n'), ((4795, 4841), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['pdf_url', 'save_path'], {}), '(pdf_url, save_path)\n', (4821, 4841), False, 'import urllib\n'), ((5368, 5386), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (5382, 5386), False, 'import csv\n'), ((8008, 8028), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (8022, 8028), False, 'import csv\n'), ((8205, 8237), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'title1'], {}), "('[^a-zA-Z]', ' ', title1)\n", (8211, 8237), False, 'import re\n'), ((8256, 8288), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'title2'], {}), "('[^a-zA-Z]', ' ', title2)\n", (8262, 8288), False, 'import re\n'), ((9545, 9568), 'os.path.join', 'os.path.join', (['dir', 'item'], {}), '(dir, item)\n', (9557, 9568), False, 'import os\n'), ((10975, 10995), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (10989, 10995), False, 'import csv\n'), ((17149, 17172), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (17170, 17172), False, 'import pinecone\n'), ((17214, 17302), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'pinecone_index_name', 'metric': '"""dotproduct"""', 'dimension': '(1536)'}), "(name=pinecone_index_name, metric='dotproduct',\n dimension=1536)\n", (17235, 17302), False, 'import pinecone\n'), ((1250, 1307), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (1262, 1307), False, 'import os\n'), ((1348, 1405), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (1360, 1405), False, 'import os\n'), ((2268, 2325), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2280, 2325), False, 'import os\n'), ((2366, 2423), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2378, 2423), False, 'import os\n'), ((2509, 2566), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2521, 2566), False, 'import os\n'), ((2778, 2835), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2790, 2835), False, 'import os\n'), ((2844, 2901), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.bib"""'], {}), "(workspace_dir_name, 'results', 'papers.bib')\n", (2856, 2901), False, 'import os\n'), ((3100, 3157), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.bib"""'], {}), "(workspace_dir_name, 'results', 'papers.bib')\n", (3112, 3157), False, 'import os\n'), ((3172, 3236), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""', '"""test.csv"""'], {}), "(workspace_dir_name, 'refy_suggestions', 'test.csv')\n", (3184, 3236), False, 'import os\n'), ((3468, 3536), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""', "(title + '.pdf')"], {}), "(workspace_dir_name, 'refy_suggestions', title + '.pdf')\n", (3480, 3536), False, 'import os\n'), ((5119, 5167), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['final_url', 'save_path'], {}), '(final_url, save_path)\n', (5145, 5167), False, 'import urllib\n'), ((7602, 7626), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7614, 7626), False, 'import os\n'), ((8802, 8822), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (8816, 8822), False, 'import csv\n'), ((9636, 9659), 'os.path.isdir', 'os.path.isdir', (['itempath'], {}), '(itempath)\n', (9649, 9659), False, 'import os\n'), ((10078, 10098), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (10092, 10098), False, 'import csv\n'), ((10760, 10803), 'csv.DictWriter', 'csv.DictWriter', (['file'], {'fieldnames': 'fieldnames'}), '(file, fieldnames=fieldnames)\n', (10774, 10803), False, 'import csv\n'), ((16201, 16230), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16213, 16230), False, 'import os\n'), ((16702, 16731), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16714, 16731), False, 'import os\n'), ((576, 587), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (585, 587), False, 'import os\n'), ((627, 638), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (636, 638), False, 'import os\n'), ((3718, 3729), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3727, 3729), False, 'import os\n'), ((3806, 3818), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3816, 3818), False, 'import uuid\n'), ((3896, 3922), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (3911, 3922), False, 'import os\n'), ((5564, 5608), 'os.path.join', 'os.path.join', (['csv_path', '""".."""', "(title + '.pdf')"], {}), "(csv_path, '..', title + '.pdf')\n", (5576, 5608), False, 'import os\n'), ((7652, 7676), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7664, 7676), False, 'import os\n'), ((7738, 7762), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7750, 7762), False, 'import os\n'), ((14294, 14449), 'csv.DictWriter', 'csv.DictWriter', (['fp', "['paperId', 'title', 'first_author', 'year', 'abstract', 'tldr', 'bibtex',\n 'influentialCitationCount', 'venue', 'journal', 'pages']"], {}), "(fp, ['paperId', 'title', 'first_author', 'year', 'abstract',\n 'tldr', 'bibtex', 'influentialCitationCount', 'venue', 'journal', 'pages'])\n", (14308, 14449), False, 'import csv\n'), ((16271, 16300), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16283, 16300), False, 'import os\n'), ((16781, 16810), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16793, 16810), False, 'import os\n'), ((745, 757), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (755, 757), False, 'import uuid\n'), ((15951, 15980), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (15963, 15980), False, 'import os\n'), ((16590, 16619), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16602, 16619), False, 'import os\n'), ((16085, 16097), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16095, 16097), False, 'import uuid\n')]
import streamlit as st from langchain.prompts import PromptTemplate chat_template = PromptTemplate( input_variables=['transcript','summary','chat_history','user_message', 'sentiment_report'], template=''' You are an AI chatbot intended to discuss about the user's audio transcription. \nTRANSCRIPT: "{transcript}" \nTRANSCIRPT SUMMARY: "{summary}" \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}" \nCHAT HISTORY: {chat_history} \nUSER MESSAGE: "{user_message}" \nAI RESPONSE HERE: ''' ) sentiment_prompt = PromptTemplate( input_variables=['transcript','summary'], template=''' Return a single word sentiment of either ['Positive','Negative' or 'Neutral'] from this transcript and summary. After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment. \nTRANSCRIPT: {transcript} \nTRANSCRIPT SUMMARY: {summary} \nSENTIMENT LABEL HERE ('Positive','Negative', or 'Neutral') <comma-seperated> REPORT HERE: ''' ) fact_check_prompt = ''' Fact-check this transcript for factual or logical inacurracies or inconsistencies \nWrite a report on the factuality / logic of the transcirpt \nTRANSCRIPT: {} \nTRANSCRIPT SUMMARY: {} \nAI FACT CHECK RESPONSE HERE: '''
[ "langchain.prompts.PromptTemplate" ]
[((88, 562), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary', 'chat_history', 'user_message', 'sentiment_report']", 'template': '"""\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\', \'chat_history\',\n \'user_message\', \'sentiment_report\'], template=\n """\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """\n )\n', (102, 562), False, 'from langchain.prompts import PromptTemplate\n'), ((595, 1095), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary']", 'template': '"""\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\'], template=\n """\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """\n )\n', (609, 1095), False, 'from langchain.prompts import PromptTemplate\n')]
from enum import Enum from typing import Callable, Tuple from langchain.agents.agent import AgentExecutor from langchain.agents.tools import BaseTool, Tool class ToolScope(Enum): GLOBAL = "global" SESSION = "session" SessionGetter = Callable[[], Tuple[str, AgentExecutor]] def tool( name: str, description: str, scope: ToolScope = ToolScope.GLOBAL, ): def decorator(func): func.name = name func.description = description func.is_tool = True func.scope = scope return func return decorator class ToolWrapper: def __init__(self, name: str, description: str, scope: ToolScope, func): self.name = name self.description = description self.scope = scope self.func = func def is_global(self) -> bool: return self.scope == ToolScope.GLOBAL def is_per_session(self) -> bool: return self.scope == ToolScope.SESSION def to_tool( self, get_session: SessionGetter = lambda: [], ) -> BaseTool: func = self.func if self.is_per_session(): func = lambda *args, **kwargs: self.func( *args, **kwargs, get_session=get_session ) return Tool( name=self.name, description=self.description, func=func, ) class BaseToolSet: def tool_wrappers(cls) -> list[ToolWrapper]: methods = [ getattr(cls, m) for m in dir(cls) if hasattr(getattr(cls, m), "is_tool") ] return [ToolWrapper(m.name, m.description, m.scope, m) for m in methods]
[ "langchain.agents.tools.Tool" ]
[((1245, 1306), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'self.name', 'description': 'self.description', 'func': 'func'}), '(name=self.name, description=self.description, func=func)\n', (1249, 1306), False, 'from langchain.agents.tools import BaseTool, Tool\n')]
import asyncio from langchain.document_loaders import PyPDFLoader, DirectoryLoader from langchain import PromptTemplate from langchain.embeddings import HuggingFaceEmbeddings from langchain.vectorstores import FAISS from langchain.llms import CTransformers from langchain.chains import RetrievalQA import chainlit as cl DB_FAISS_PATH = 'vectorstores/db_faiss' custom_prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer. Context: {context} Question: {question} Only return the helpful answer below and nothing else. Helpful answer: """ def set_custom_prompt(): """ Prompt template for QA retrieval for each vectorstore """ prompt = PromptTemplate(template=custom_prompt_template, input_variables=['context', 'question']) return prompt # Retrieval QA Chain def retrieval_qa_chain(llm, prompt, db): qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type='stuff', retriever=db.as_retriever(search_kwargs={'k': 2}), return_source_documents=True, chain_type_kwargs={'prompt': prompt} ) return qa_chain # Loading the model def load_llm(): # Load the locally downloaded model here llm = CTransformers( model="TheBloke/Llama-2-7B-Chat-GGML", model_type="llama", max_new_tokens=512, temperature=0.5 ) return llm # QA Model Function async def qa_bot(): embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cpu'}) db = FAISS.load_local(DB_FAISS_PATH, embeddings) llm = load_llm() qa_prompt = set_custom_prompt() qa = retrieval_qa_chain(llm, qa_prompt, db) return qa # Output function async def final_result(query): qa_result = await qa_bot() response = await qa_result({'query': query}) return response # chainlit code @cl.on_chat_start async def start(): chain = await qa_bot() # msg = cl.Message(content="Starting the bot...") # await msg.send() # msg.content = "Hi, Welcome to Medical Bot. What is your query?" # await msg.update() cl.user_session.set("chain", chain) @cl.on_message async def main(message): chain = cl.user_session.get("chain") cb = cl.AsyncLangchainCallbackHandler( stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"] ) cb.answer_reached = True res = await chain.acall(message.content, callbacks=[cb]) answer = res["result"] sources = res["source_documents"] if sources: answer += f"\nSources:" + str(sources) else: answer += "\nNo sources found" await cl.Message(content=answer).send() if __name__ == "__main__": asyncio.run(cl.main())
[ "langchain.vectorstores.FAISS.load_local", "langchain.embeddings.HuggingFaceEmbeddings", "langchain.llms.CTransformers", "langchain.PromptTemplate" ]
[((808, 900), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'custom_prompt_template', 'input_variables': "['context', 'question']"}), "(template=custom_prompt_template, input_variables=['context',\n 'question'])\n", (822, 900), False, 'from langchain import PromptTemplate\n'), ((1522, 1635), 'langchain.llms.CTransformers', 'CTransformers', ([], {'model': '"""TheBloke/Llama-2-7B-Chat-GGML"""', 'model_type': '"""llama"""', 'max_new_tokens': '(512)', 'temperature': '(0.5)'}), "(model='TheBloke/Llama-2-7B-Chat-GGML', model_type='llama',\n max_new_tokens=512, temperature=0.5)\n", (1535, 1635), False, 'from langchain.llms import CTransformers\n'), ((1753, 1863), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name='sentence-transformers/all-MiniLM-L6-v2',\n model_kwargs={'device': 'cpu'})\n", (1774, 1863), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1910, 1953), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['DB_FAISS_PATH', 'embeddings'], {}), '(DB_FAISS_PATH, embeddings)\n', (1926, 1953), False, 'from langchain.vectorstores import FAISS\n'), ((2504, 2539), 'chainlit.user_session.set', 'cl.user_session.set', (['"""chain"""', 'chain'], {}), "('chain', chain)\n", (2523, 2539), True, 'import chainlit as cl\n'), ((2597, 2625), 'chainlit.user_session.get', 'cl.user_session.get', (['"""chain"""'], {}), "('chain')\n", (2616, 2625), True, 'import chainlit as cl\n'), ((2636, 2740), 'chainlit.AsyncLangchainCallbackHandler', 'cl.AsyncLangchainCallbackHandler', ([], {'stream_final_answer': '(True)', 'answer_prefix_tokens': "['FINAL', 'ANSWER']"}), "(stream_final_answer=True,\n answer_prefix_tokens=['FINAL', 'ANSWER'])\n", (2668, 2740), True, 'import chainlit as cl\n'), ((3124, 3133), 'chainlit.main', 'cl.main', ([], {}), '()\n', (3131, 3133), True, 'import chainlit as cl\n'), ((3043, 3069), 'chainlit.Message', 'cl.Message', ([], {'content': 'answer'}), '(content=answer)\n', (3053, 3069), True, 'import chainlit as cl\n')]
from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.chat_models import ChatOpenAI from dotenv import load_dotenv import os from langchain.chains import SimpleSequentialChain # Create a .env file in the root of your project and add your OpenAI API key to it # Load env files load_dotenv() openai_api_key = os.environ.get('openai_api_key') # This is an LLMChain to generate company names given a company description. llm = ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo") # Create templates template_name = """You are a company name generator. Based on a company description, it is your job to create a company name. Company description: {company_description} Company name:""" prompt_template_name = PromptTemplate(input_variables=["company_description"], template=template_name) # This is an LLMChain to generate company slogans given a company name and company description. template_slogan = """You are a company slogan generator. Based on a company name, it is your job to create a company slogan. Company name: {company_name} Company slogan:""" prompt_template_slogan = PromptTemplate(input_variables=["company_name"], template=template_slogan) # Create chains name_chain = LLMChain(llm=llm, prompt=prompt_template_name) slogan_chain = LLMChain(llm=llm, prompt=prompt_template_slogan) # This is the overall chain where we run these two chains in sequence. overall_chain = SimpleSequentialChain(chains=[name_chain, slogan_chain], verbose=True) slogan = overall_chain.run("We are a company that sells shoes.")
[ "langchain.prompts.PromptTemplate", "langchain.chains.SimpleSequentialChain", "langchain.chat_models.ChatOpenAI", "langchain.chains.LLMChain" ]
[((321, 334), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (332, 334), False, 'from dotenv import load_dotenv\n'), ((352, 384), 'os.environ.get', 'os.environ.get', (['"""openai_api_key"""'], {}), "('openai_api_key')\n", (366, 384), False, 'import os\n'), ((469, 524), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.7, model_name='gpt-3.5-turbo')\n", (479, 524), False, 'from langchain.chat_models import ChatOpenAI\n'), ((757, 836), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_description']", 'template': 'template_name'}), "(input_variables=['company_description'], template=template_name)\n", (771, 836), False, 'from langchain.prompts import PromptTemplate\n'), ((1137, 1211), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_name']", 'template': 'template_slogan'}), "(input_variables=['company_name'], template=template_slogan)\n", (1151, 1211), False, 'from langchain.prompts import PromptTemplate\n'), ((1242, 1288), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_name'}), '(llm=llm, prompt=prompt_template_name)\n', (1250, 1288), False, 'from langchain.chains import LLMChain\n'), ((1304, 1352), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_slogan'}), '(llm=llm, prompt=prompt_template_slogan)\n', (1312, 1352), False, 'from langchain.chains import LLMChain\n'), ((1441, 1511), 'langchain.chains.SimpleSequentialChain', 'SimpleSequentialChain', ([], {'chains': '[name_chain, slogan_chain]', 'verbose': '(True)'}), '(chains=[name_chain, slogan_chain], verbose=True)\n', (1462, 1511), False, 'from langchain.chains import SimpleSequentialChain\n')]
import os import re from dotenv import load_dotenv from langchain.chat_models import ChatOpenAI from slack_bolt import App from slack_bolt.adapter.socket_mode import SocketModeHandler load_dotenv() # ボットトークンを使ってアプリを初期化します app = App(token=os.environ.get("SLACK_BOT_TOKEN")) @app.event("app_mention") def handle_mention(event, say): thread_ts = event["ts"] message = re.sub("<@.*>", "", event["text"]) llm = ChatOpenAI( model_name=os.environ["OPENAI_API_MODEL"], temperature=os.environ["OPENAI_API_TEMPERATURE"], ) response = llm.predict(message) say(text=response, thread_ts=thread_ts) # ソケットモードハンドラーを使ってアプリを起動します if __name__ == "__main__": SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
[ "langchain.chat_models.ChatOpenAI" ]
[((186, 199), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (197, 199), False, 'from dotenv import load_dotenv\n'), ((378, 412), 're.sub', 're.sub', (['"""<@.*>"""', '""""""', "event['text']"], {}), "('<@.*>', '', event['text'])\n", (384, 412), False, 'import re\n'), ((424, 532), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': "os.environ['OPENAI_API_MODEL']", 'temperature': "os.environ['OPENAI_API_TEMPERATURE']"}), "(model_name=os.environ['OPENAI_API_MODEL'], temperature=os.\n environ['OPENAI_API_TEMPERATURE'])\n", (434, 532), False, 'from langchain.chat_models import ChatOpenAI\n'), ((241, 274), 'os.environ.get', 'os.environ.get', (['"""SLACK_BOT_TOKEN"""'], {}), "('SLACK_BOT_TOKEN')\n", (255, 274), False, 'import os\n'), ((693, 746), 'slack_bolt.adapter.socket_mode.SocketModeHandler', 'SocketModeHandler', (['app', "os.environ['SLACK_APP_TOKEN']"], {}), "(app, os.environ['SLACK_APP_TOKEN'])\n", (710, 746), False, 'from slack_bolt.adapter.socket_mode import SocketModeHandler\n')]
""" This module contains the function to classify the user query. """ import json from langchain.prompts import ChatPromptTemplate from langchain.chains import create_extraction_chain from langchain.chat_models import ChatOpenAI from langchain.document_loaders import TextLoader from langchain.document_loaders import DirectoryLoader from ..config import Config config = Config() config.load() OPENAI_API_KEY = config.openai_key async def classify_user_query(query, context, document_types): """Classify the user query based on the context and document types.""" llm = ChatOpenAI(temperature=0, model=config.model) prompt_classify = ChatPromptTemplate.from_template( """You are a classifier. You store user memories, thoughts and feelings. Determine if you need to use them to answer this query : {query}""" ) json_structure = [ { "name": "classifier", "description": "Classification", "parameters": { "type": "object", "properties": { "UserQueryClassifier": { "type": "bool", "description": "The classification of documents " "in groups such as legal, medical, etc.", } }, "required": ["UserQueryClassifier"], }, } ] chain_filter = prompt_classify | llm.bind( function_call={"name": "classifier"}, functions=json_structure ) classifier_output = await chain_filter.ainvoke( {"query": query, "context": context, "document_types": document_types} ) arguments_str = classifier_output.additional_kwargs["function_call"]["arguments"] print("This is the arguments string", arguments_str) arguments_dict = json.loads(arguments_str) classfier_value = arguments_dict.get("UserQueryClassifier", None) print("This is the classifier value", classfier_value) return classfier_value
[ "langchain.prompts.ChatPromptTemplate.from_template", "langchain.chat_models.ChatOpenAI" ]
[((582, 627), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'config.model'}), '(temperature=0, model=config.model)\n', (592, 627), False, 'from langchain.chat_models import ChatOpenAI\n'), ((650, 853), 'langchain.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['"""You are a classifier. \n You store user memories, thoughts and feelings. \n Determine if you need to use them to answer this query : {query}"""'], {}), '(\n """You are a classifier. \n You store user memories, thoughts and feelings. \n Determine if you need to use them to answer this query : {query}"""\n )\n', (682, 853), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((1854, 1879), 'json.loads', 'json.loads', (['arguments_str'], {}), '(arguments_str)\n', (1864, 1879), False, 'import json\n')]
from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.schema import StrOutputParser from langchain.schema.runnable import Runnable from langchain.schema.runnable.config import RunnableConfig import chainlit as cl @cl.on_chat_start async def on_chat_start(): model = ChatOpenAI(openai_api_base="http://localhost:8888/v1",streaming=True) prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a very knowledgeable historian who provides accurate and eloquent answers to historical questions.", ), ("human", "{question}"), ] ) runnable = prompt | model | StrOutputParser() cl.user_session.set("runnable", runnable) @cl.on_message async def on_message(message: cl.Message): runnable = cl.user_session.get("runnable") # type: Runnable msg = cl.Message(content="") for chunk in await cl.make_async(runnable.stream)( {"question": message.content}, config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]), ): await msg.stream_token(chunk) await msg.send()
[ "langchain.prompts.ChatPromptTemplate.from_messages", "langchain.schema.StrOutputParser", "langchain.chat_models.ChatOpenAI" ]
[((328, 398), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_base': '"""http://localhost:8888/v1"""', 'streaming': '(True)'}), "(openai_api_base='http://localhost:8888/v1', streaming=True)\n", (338, 398), False, 'from langchain.chat_models import ChatOpenAI\n'), ((411, 600), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[(\'system\',\n "You\'re a very knowledgeable historian who provides accurate and eloquent answers to historical questions."\n ), (\'human\', \'{question}\')]'], {}), '([(\'system\',\n "You\'re a very knowledgeable historian who provides accurate and eloquent answers to historical questions."\n ), (\'human\', \'{question}\')])\n', (443, 600), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((742, 783), 'chainlit.user_session.set', 'cl.user_session.set', (['"""runnable"""', 'runnable'], {}), "('runnable', runnable)\n", (761, 783), True, 'import chainlit as cl\n'), ((859, 890), 'chainlit.user_session.get', 'cl.user_session.get', (['"""runnable"""'], {}), "('runnable')\n", (878, 890), True, 'import chainlit as cl\n'), ((920, 942), 'chainlit.Message', 'cl.Message', ([], {'content': '""""""'}), "(content='')\n", (930, 942), True, 'import chainlit as cl\n'), ((720, 737), 'langchain.schema.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (735, 737), False, 'from langchain.schema import StrOutputParser\n'), ((967, 997), 'chainlit.make_async', 'cl.make_async', (['runnable.stream'], {}), '(runnable.stream)\n', (980, 997), True, 'import chainlit as cl\n'), ((1079, 1108), 'chainlit.LangchainCallbackHandler', 'cl.LangchainCallbackHandler', ([], {}), '()\n', (1106, 1108), True, 'import chainlit as cl\n')]
import os import streamlit as st from PyPDF2 import PdfReader, PdfWriter from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.chains.question_answering import load_qa_chain from langchain.llms import OpenAI from langchain.callbacks import get_openai_callback def ChatPDF(text): # st.write(text) #split into chunks text_splitter = CharacterTextSplitter( separator="\n", chunk_size = 1000, chunk_overlap = 200, length_function=len ) chunks = text_splitter.split_text(text) # st.write(chunks) # creating embeddings OPENAI_API_KEY = st.text_input("OPENAI API KEY", type = "password") if OPENAI_API_KEY: embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) # st.write("Embedding Created") # st.write(embeddings) knowledge_base = FAISS.from_texts(chunks, embeddings) st.write("Knowledge Base created ") #show user input def ask_question(i=0): user_question = st.text_input("Ask a question about your PDF?",key = i) if user_question: docs = knowledge_base.similarity_search(user_question) # st.write(docs) llm = OpenAI(openai_api_key=OPENAI_API_KEY) chain = load_qa_chain(llm, chain_type="stuff") with get_openai_callback() as cb: response = chain.run(input_documents=docs, question=user_question) print(cb) st.write(response) ask_question(i+1) ask_question() def main(): st.set_page_config(page_title="Ask ur PDF", page_icon="📄") hide_st_style = """ <style> #mainMenue {visibility: hidden;} footer {visibility: hidden;} #header {visibility: hidden;} </style> """ st.markdown(hide_st_style, unsafe_allow_html=True) # st.write(st.set_page_config) st.header("Ask your PDF 🤔💭") #uploading file pdf = st.file_uploader("Upload your PDF ", type="pdf") # extract the text if pdf is not None: option = st.selectbox("What you want to do with PDF📜", [ "Meta Data📂", "Extract Raw Text📄", "Extract Links🔗", "Extract Images🖼️", "Make PDF password protected🔐", "PDF Annotation📝", "ChatPDF💬" ]) pdf_reader = PdfReader(pdf) text = "" for page in pdf_reader.pages: text += page.extract_text() if option == "Meta Data📂": st.write(pdf_reader.metadata) elif option == "Make PDF password protected🔐": pswd = st.text_input("Enter yourpass word", type="password") if pswd: with st.spinner("Encrypting..."): pdf_writer = PdfWriter() for page_num in range(len(pdf_reader.pages)): pdf_writer.add_page(pdf_reader.pages[page_num]) pdf_writer.encrypt(pswd) with open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "wb") as f: pdf_writer.write(f) st.success("Encryption Successful!") st.download_button( label="Download Encrypted PDF", data=open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "rb").read(), file_name=f"{pdf.name.split('.')[0]}_encrypted.pdf", mime="application/octet-stream", ) try: os.remove(f"{pdf.name.split('.')[0]}_encrypted.pdf") except: pass elif option == "Extract Raw Text📄": st.write(text) elif option == "Extract Links🔗": for page in pdf_reader.pages: if "/Annots" in page: for annot in page["/Annots"]: subtype = annot.get_object()["/Subtype"] if subtype == "/Link": try: st.write(annot.get_object()["/A"]["/URI"]) except: pass elif option == "Extract Images🖼️": for page in pdf_reader.pages: try: for img in page.images: st.write(img.name) st.image(img.data) except: pass elif option == "PDF Annotation📝": for page in pdf_reader.pages: if "/Annots" in page: for annot in page["/Annots"]: obj = annot.get_object() st.write(obj) st.write("***********") annotation = {"subtype": obj["/Subtype"], "location": obj["/Rect"]} st.write(annotation) elif option == "ChatPDF💬": ChatPDF(text) if __name__ == "__main__": main()
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.vectorstores.FAISS.from_texts", "langchain.llms.OpenAI", "langchain.callbacks.get_openai_callback", "langchain.chains.question_answering.load_qa_chain", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((481, 579), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (502, 579), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((745, 793), 'streamlit.text_input', 'st.text_input', (['"""OPENAI API KEY"""'], {'type': '"""password"""'}), "('OPENAI API KEY', type='password')\n", (758, 793), True, 'import streamlit as st\n'), ((1783, 1841), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Ask ur PDF"""', 'page_icon': '"""📄"""'}), "(page_title='Ask ur PDF', page_icon='📄')\n", (1801, 1841), True, 'import streamlit as st\n'), ((2081, 2131), 'streamlit.markdown', 'st.markdown', (['hide_st_style'], {'unsafe_allow_html': '(True)'}), '(hide_st_style, unsafe_allow_html=True)\n', (2092, 2131), True, 'import streamlit as st\n'), ((2175, 2203), 'streamlit.header', 'st.header', (['"""Ask your PDF 🤔💭"""'], {}), "('Ask your PDF 🤔💭')\n", (2184, 2203), True, 'import streamlit as st\n'), ((2242, 2290), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDF """'], {'type': '"""pdf"""'}), "('Upload your PDF ', type='pdf')\n", (2258, 2290), True, 'import streamlit as st\n'), ((842, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (858, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((989, 1025), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['chunks', 'embeddings'], {}), '(chunks, embeddings)\n', (1005, 1025), False, 'from langchain.vectorstores import FAISS\n'), ((1035, 1070), 'streamlit.write', 'st.write', (['"""Knowledge Base created """'], {}), "('Knowledge Base created ')\n", (1043, 1070), True, 'import streamlit as st\n'), ((2360, 2551), 'streamlit.selectbox', 'st.selectbox', (['"""What you want to do with PDF📜"""', "['Meta Data📂', 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬']"], {}), "('What you want to do with PDF📜', ['Meta Data📂',\n 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬'])\n", (2372, 2551), True, 'import streamlit as st\n'), ((2672, 2686), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (2681, 2686), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((1160, 1214), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF?"""'], {'key': 'i'}), "('Ask a question about your PDF?', key=i)\n", (1173, 1214), True, 'import streamlit as st\n'), ((2835, 2864), 'streamlit.write', 'st.write', (['pdf_reader.metadata'], {}), '(pdf_reader.metadata)\n', (2843, 2864), True, 'import streamlit as st\n'), ((1378, 1415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (1384, 1415), False, 'from langchain.llms import OpenAI\n'), ((1441, 1479), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (1454, 1479), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1667, 1685), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1675, 1685), True, 'import streamlit as st\n'), ((2941, 2994), 'streamlit.text_input', 'st.text_input', (['"""Enter yourpass word"""'], {'type': '"""password"""'}), "('Enter yourpass word', type='password')\n", (2954, 2994), True, 'import streamlit as st\n'), ((1502, 1523), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1521, 1523), False, 'from langchain.callbacks import get_openai_callback\n'), ((4062, 4076), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (4070, 4076), True, 'import streamlit as st\n'), ((3039, 3066), 'streamlit.spinner', 'st.spinner', (['"""Encrypting..."""'], {}), "('Encrypting...')\n", (3049, 3066), True, 'import streamlit as st\n'), ((3102, 3113), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (3111, 3113), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((3480, 3516), 'streamlit.success', 'st.success', (['"""Encryption Successful!"""'], {}), "('Encryption Successful!')\n", (3490, 3516), True, 'import streamlit as st\n'), ((4697, 4715), 'streamlit.write', 'st.write', (['img.name'], {}), '(img.name)\n', (4705, 4715), True, 'import streamlit as st\n'), ((4741, 4759), 'streamlit.image', 'st.image', (['img.data'], {}), '(img.data)\n', (4749, 4759), True, 'import streamlit as st\n'), ((5041, 5054), 'streamlit.write', 'st.write', (['obj'], {}), '(obj)\n', (5049, 5054), True, 'import streamlit as st\n'), ((5080, 5103), 'streamlit.write', 'st.write', (['"""***********"""'], {}), "('***********')\n", (5088, 5103), True, 'import streamlit as st\n'), ((5222, 5242), 'streamlit.write', 'st.write', (['annotation'], {}), '(annotation)\n', (5230, 5242), True, 'import streamlit as st\n')]
"""Example of observing LLM calls made by via callable OpenAI LLM.""" from langchain.llms import OpenAI from langchain_prefect.plugins import RecordLLMCalls llm = OpenAI(temperature=0.9) with RecordLLMCalls(): llm("What would be a good name for a company that makes colorful socks?")
[ "langchain.llms.OpenAI", "langchain_prefect.plugins.RecordLLMCalls" ]
[((166, 189), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (172, 189), False, 'from langchain.llms import OpenAI\n'), ((196, 212), 'langchain_prefect.plugins.RecordLLMCalls', 'RecordLLMCalls', ([], {}), '()\n', (210, 212), False, 'from langchain_prefect.plugins import RecordLLMCalls\n')]
from langchain.agents import load_tools from langchain.agents import initialize_agent, Tool from langchain.agents import AgentType from langchain.utilities import SerpAPIWrapper from langchain_app.models.vicuna_request_llm import VicunaLLM # First, let's load the language model we're going to use to control the agent. llm = VicunaLLM() params = { "engine": "google", "gl": "us", "hl": "en", } search = SerpAPIWrapper(params=params) # Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in. tools = load_tools(["python_repl", "multi_line_human"], llm=llm) tools.append( Tool( name="Search", func=search.run, description="useful for when you need to ask with search", ) ) # Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) # Now let's test it out! agent.run( """ Oh, wait, before your start your next question, we should go over one more tool that is available. The action MultiLineHuman can be used to ask help to a human. Action: MultiLineHuman Action Input: "Human, please fix this error for me?" Observation: # Gladly so, you declared a variable with \_, which is an illegal character. # Here's the right version: empty_list = [] Thought: The Human helped me, I should use his observation as input to the shell Action: Python REPL Action Input: empty_list = [] Observation: Thought: It worked this time. I have concluded the task. Final Answer: The following script worked: empty_list = [] OK, now you begin Question: Ask from the help of a human about a topic. You should then help him. """ )
[ "langchain.agents.initialize_agent", "langchain.utilities.SerpAPIWrapper", "langchain.agents.load_tools", "langchain_app.models.vicuna_request_llm.VicunaLLM", "langchain.agents.Tool" ]
[((328, 339), 'langchain_app.models.vicuna_request_llm.VicunaLLM', 'VicunaLLM', ([], {}), '()\n', (337, 339), False, 'from langchain_app.models.vicuna_request_llm import VicunaLLM\n'), ((419, 448), 'langchain.utilities.SerpAPIWrapper', 'SerpAPIWrapper', ([], {'params': 'params'}), '(params=params)\n', (433, 448), False, 'from langchain.utilities import SerpAPIWrapper\n'), ((566, 622), 'langchain.agents.load_tools', 'load_tools', (["['python_repl', 'multi_line_human']"], {'llm': 'llm'}), "(['python_repl', 'multi_line_human'], llm=llm)\n", (576, 622), False, 'from langchain.agents import load_tools\n'), ((891, 982), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n verbose=True)\n', (907, 982), False, 'from langchain.agents import initialize_agent, Tool\n'), ((642, 742), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to ask with search"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to ask with search')\n", (646, 742), False, 'from langchain.agents import initialize_agent, Tool\n')]
# /app/src/tools/setup.py import logging from langchain.pydantic_v1 import BaseModel, Field from langchain.tools import BaseTool from langchain_community.tools import DuckDuckGoSearchResults from src.tools.doc_search import DocumentSearch logger = logging.getLogger(__name__) class SearchWebInput(BaseModel): query: str = Field(description="The search query") class SearchTechDocsInput(BaseModel): query: str = Field(description="The search query") collection: str = Field(default="techdocs", description="The document collection to search in") class SearchWebTool(BaseTool): name = "search_web" description = "Conducts DuckDuckGo searches." args_schema = SearchWebInput return_direct = True def _run(self, query: str, **kwargs) -> str: search = DuckDuckGoSearchResults() return search.run(query) class SearchTechDocsTool(BaseTool): name = "search_techdocs" description = "This tool enables the querying of a specialized vector store named ‘TechDocs,’ a repository where users archive valuable technical documentation they have encountered. It is particularly beneficial when engaging with technical subjects or when involved in coding activities. Utilize this search tool to scrutinize the vector store for pertinent context when addressing technical inquiries or tasks. If a term from the user input is unfamiliar but appears to be technical in nature, it is imperative to consult ‘TechDocs’ to ascertain whether relevant information or context is available therein. For your awareness, the information provided is sourced from ‘TechDocs,’ and we will refer to this source for any related queries." args_schema = SearchTechDocsInput return_direct = True def _run(self, query: str, collection: str = "techdocs", **kwargs) -> str: search = DocumentSearch(query, collection) results = search.search_documents() return results class ToolSetup: """ A class dedicated to the setup and initialization of tools used by the agent. """ @classmethod def setup_tools(cls) -> list: """ Initializes and returns a list of tools for the agent. Returns: - list: A list of initialized tools for agent's use. """ return [SearchWebTool(), SearchTechDocsTool()]
[ "langchain.pydantic_v1.Field", "langchain_community.tools.DuckDuckGoSearchResults" ]
[((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((331, 368), 'langchain.pydantic_v1.Field', 'Field', ([], {'description': '"""The search query"""'}), "(description='The search query')\n", (336, 368), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((426, 463), 'langchain.pydantic_v1.Field', 'Field', ([], {'description': '"""The search query"""'}), "(description='The search query')\n", (431, 463), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((486, 563), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '"""techdocs"""', 'description': '"""The document collection to search in"""'}), "(default='techdocs', description='The document collection to search in')\n", (491, 563), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((796, 821), 'langchain_community.tools.DuckDuckGoSearchResults', 'DuckDuckGoSearchResults', ([], {}), '()\n', (819, 821), False, 'from langchain_community.tools import DuckDuckGoSearchResults\n'), ((1829, 1862), 'src.tools.doc_search.DocumentSearch', 'DocumentSearch', (['query', 'collection'], {}), '(query, collection)\n', (1843, 1862), False, 'from src.tools.doc_search import DocumentSearch\n')]
from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex ''' Title of the page: A simple Python implementation of the ReAct pattern for LLMs Name of the website: LlamaIndex (GPT Index) is a data framework for your LLM application. URL: https://github.com/jerryjliu/llama_index ''' docs = SimpleDirectoryReader("../data/paul_graham/").load_data() from llama_index import ServiceContext, LLMPredictor, TreeIndex from langchain.chat_models import ChatOpenAI llm_predictor = LLMPredictor(llm=ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0)) llama_debug = LlamaDebugHandler(print_trace_on_end=True) callback_manager = CallbackManager([llama_debug]) service_context = ServiceContext.from_defaults(callback_manager=callback_manager, llm_predictor=llm_predictor) index = VectorStoreIndex.from_documents(docs, service_context=service_context) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") # Print info on the LLM calls during the list index query print(llama_debug.get_event_time_info(CBEventType.LLM)) # Print info on llm inputs/outputs - returns start/end events for each LLM call event_pairs = llama_debug.get_llm_inputs_outputs() print(event_pairs[0][0]) print(event_pairs[0][1].payload.keys()) print(event_pairs[0][1].payload['response']) # Get info on any event type event_pairs = llama_debug.get_event_pairs(CBEventType.CHUNKING) print(event_pairs[0][0].payload.keys()) # get first chunking start event print(event_pairs[0][1].payload.keys()) # get first chunking end event # Clear the currently cached events llama_debug.flush_event_logs()
[ "langchain.chat_models.ChatOpenAI" ]
[((676, 718), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (693, 718), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((738, 768), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (753, 768), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((787, 883), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'callback_manager': 'callback_manager', 'llm_predictor': 'llm_predictor'}), '(callback_manager=callback_manager,\n llm_predictor=llm_predictor)\n', (815, 883), False, 'from llama_index import ServiceContext, LLMPredictor, TreeIndex\n'), ((889, 959), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (920, 959), False, 'from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((405, 450), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../data/paul_graham/"""'], {}), "('../data/paul_graham/')\n", (426, 450), False, 'from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((606, 659), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (616, 659), False, 'from langchain.chat_models import ChatOpenAI\n')]
from typing import Any, Callable from pandas import DataFrame from exact_rag.config import EmbeddingType, Embeddings, DatabaseType, Databases from langchain_openai import OpenAIEmbeddings from langchain_community.embeddings import OllamaEmbeddings from langchain.vectorstores.chroma import Chroma from langchain.vectorstores.elasticsearch import ElasticsearchStore from langchain.indexes import SQLRecordManager from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import DataFrameLoader from langchain.indexes import index from langchain.chains import RetrievalQA from langchain_openai import ChatOpenAI from langchain_community.llms.ollama import Ollama class Caller: def __init__( self, callable: Callable[..., Any], arg_swap: dict[str, Any] | None = None, accept_only: list[str] | None = None, ): self._callable = callable self._arg_swap = arg_swap self._accept_only = accept_only def __call__(self, **args): if self._accept_only: args = { key: value for (key, value) in args.items() if key in self._accept_only } if self._arg_swap: args = { self._arg_swap.get(arg, arg): value for (arg, value) in args.items() } return self._callable(**args) embeddings = { EmbeddingType.openai: Caller(OpenAIEmbeddings, accept_only=["api_key"]), EmbeddingType.ollama: Caller(OllamaEmbeddings, accept_only=["model"]), } dbs = { DatabaseType.chroma: Caller( Chroma, {"embedding": "embedding_function"}, accept_only=["embedding", "persist_directory", "collection_name"], ), DatabaseType.elastic: Caller( ElasticsearchStore, {"collection_name": "index_name", "url": "es_url"}, accept_only=[ "embedding", "url", "collection_name", "distance_strategy", "strategy", ], ), } chats = { EmbeddingType.openai: Caller( ChatOpenAI, { "api_key": "openai_api_key", "chat_model_name": "model_name", "chat_temperature": "temperature", }, accept_only=["chat_model_name", "chat_temperature", "api_key"], ), EmbeddingType.ollama: Caller( Ollama, accept_only=["model"], ), } class DataEmbedding: def __init__(self, embedding_model: Embeddings, database_model: Databases): embedding_type = embedding_model.type self._embedding = embeddings[embedding_type](**embedding_model.model_dump()) print("Embedding initialized.") database_type = database_model.type self._vectorstore = dbs[database_type]( embedding=self._embedding, **database_model.model_dump(), ) print("Vectorstore initialized.") self._record_manager = SQLRecordManager( database_model.sql_namespace, db_url=f"sqlite:///{database_model.sql_url}", ) print("Record manager initialized.") self._record_manager.create_schema() print(" schema created.") self._splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=database_model.splitter_chunk_size, chunk_overlap=database_model.splitter_chunk_overlap, ) print("Splitter initialized.") self._qa = RetrievalQA.from_chain_type( llm=chats[embedding_type](**embedding_model.model_dump()), chain_type="stuff", retriever=self._vectorstore.as_retriever( search_type=embedding_model.search_type, search_kwargs={ "k": embedding_model.search_k, "fetch_k": embedding_model.search_fetch_k, }, ), ) print("Chat initialized.") def load(self, text: str): id_key = "hash" content_name = "text" dataframe = DataFrame().from_dict([{id_key: hash(text), content_name: text}]) loader = DataFrameLoader(dataframe, page_content_column=content_name) data = loader.load() documents = self._splitter.split_documents(data) index( documents, self._record_manager, self._vectorstore, cleanup="incremental", source_id_key=id_key, ) def chat(self, query: str): self.load(query) return self._qa.invoke({"query": query})
[ "langchain_community.document_loaders.DataFrameLoader", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.indexes.SQLRecordManager", "langchain.indexes.index" ]
[((2955, 3052), 'langchain.indexes.SQLRecordManager', 'SQLRecordManager', (['database_model.sql_namespace'], {'db_url': 'f"""sqlite:///{database_model.sql_url}"""'}), "(database_model.sql_namespace, db_url=\n f'sqlite:///{database_model.sql_url}')\n", (2971, 3052), False, 'from langchain.indexes import SQLRecordManager\n'), ((3237, 3399), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'database_model.splitter_chunk_size', 'chunk_overlap': 'database_model.splitter_chunk_overlap'}), '(chunk_size=\n database_model.splitter_chunk_size, chunk_overlap=database_model.\n splitter_chunk_overlap)\n', (3289, 3399), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4141, 4201), 'langchain_community.document_loaders.DataFrameLoader', 'DataFrameLoader', (['dataframe'], {'page_content_column': 'content_name'}), '(dataframe, page_content_column=content_name)\n', (4156, 4201), False, 'from langchain_community.document_loaders import DataFrameLoader\n'), ((4296, 4403), 'langchain.indexes.index', 'index', (['documents', 'self._record_manager', 'self._vectorstore'], {'cleanup': '"""incremental"""', 'source_id_key': 'id_key'}), "(documents, self._record_manager, self._vectorstore, cleanup=\n 'incremental', source_id_key=id_key)\n", (4301, 4403), False, 'from langchain.indexes import index\n'), ((4058, 4069), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (4067, 4069), False, 'from pandas import DataFrame\n')]
import base64 from enum import Enum import json import time import logging from pywebagent.env.browser import BrowserEnv from langchain.schema import HumanMessage, SystemMessage from langchain.chat_models import ChatOpenAI logger = logging.getLogger(__name__) TASK_STATUS = Enum("TASK_STATUS", "IN_PROGRESS SUCCESS FAILED") class Task: def __init__(self, task, args) -> None: self.task = task self.args = args def get_llm(): return ChatOpenAI( model_name="gpt-4-vision-preview", temperature=1, request_timeout=120, max_tokens=2000, ) def generate_user_message(task, observation): log_history = '\n'.join(observation.env_state.log_history if observation.env_state.log_history else []) marked_elements_tags = ', '.join([f"({str(i)}) - <{elem['tag'].lower()}>" for i, elem in observation.marked_elements.items()]) text_prompt = f""" Execution error: {observation.error_message} URL: {observation.url} Marked elements tags: {marked_elements_tags} Task: {task.task} Log of last actions: {log_history} Task Arguments: {json.dumps(task.args, indent=4)} """ screenshot_binary = observation.screenshot base64_image = base64.b64encode(screenshot_binary).decode('utf-8') image_content = { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{base64_image}", "detail": "high", # low, high or auto }, } text_content = {"type": "text", "text": text_prompt} return HumanMessage(content=[text_content, image_content]) def generate_system_message(): system_prompt = """ You are an AI agent that controls a webpage using python code, in order to achieve a task. You are provided a screenshot of the webpage at each timeframe, and you decide on the next python line to execute. You can use the following functions: - actions.click(element_id, log_message) # click on an element - actions.input_text(element_id, text, clear_before_input, log_message) # Use clear_before_input=True to replace the text instead of appending to it. Never use this method on a combobox. - actions.upload_files(element_id, files: list, log_message) # use this instead of click if clicking is expected to open a file picker - actions.scroll(direction, log_message) # scroll the page up or down. direction is either 'up' or 'down'. - actions.combobox_select(element_id, option, log_message) # select an option from a combobox. - actions.finish(did_succeed, output: dict, reason) # the task is complete with did_succeed=True or False, and a text reason. output is optional dictionary of output values if the task succeeded. - actions.act(url: str, task: str, log_message, **kwargs) # run another agent on a different webpage. The sub-agent will run until it finishes and will output a result which you can use later. Useful for getting auth details from email for example. # task argument should be described in natural language. kwargs are additional arguments the sub-agent needs to complete the task. YOU MUST PROVIDE ALL NEEDED ARGUMENTS, OTHERWISE THE SUB-AGENT WILL FAIL. element_id is always an integer, and is visible as a green label with white number around the TOP-LEFT CORNER OF EACH ELEMENT. Make sure to examine all green highlighted elements before choosing one to interact with. log_message is a short one sentence explanation of what the action does. Do not use keyword arguments, all arguments are positional. IMPORTANT: ONLY ONE WEBPAGE FUNCTION CALL IS ALLOWED, EXCEPT FOR FORMS WHERE MULTIPLE CALLS ARE ALLOWED TO FILL MULTIPLE FIELDS! NOTHING IS ALLOWED AFTER THE "```" ENDING THE CODE BLOCK IMPORTANT: LOOK FOR CUES IN THE SCREENSHOTS TO SEE WHAT PARTS OF THE TASK ARE COMPLETED AND WHAT PARTS ARE NOT. FOR EXAMPLE, IF YOU ARE ASKED TO BUY A PRODUCT, LOOK FOR CUES THAT THE PRODUCT IS IN THE CART. Response format: Reasoning: Explanation for the next action, particularly focusing on interpreting the attached screenshot image. Code: ```python # variable definitions and non-webpage function calls are allowed ... # a single webpage function call. actions.func_name(args..) ``` """ return SystemMessage(content=system_prompt) def extract_code(text): """ Extracts all text in a string following the pattern "'\nCode:\n". """ pattern = "\nCode:\n```python\n" start_index = text.find(pattern) if start_index == -1: raise Exception("Code not found") # Extract the text following the pattern, without the trailing "```" extracted_text = text[start_index + len(pattern):-3] return extracted_text def calcualte_next_action(task, observation): llm = get_llm() system_message = generate_system_message() user_message = generate_user_message(task, observation) try: ai_message = llm([system_message, user_message]) except: # This sometimes solves the RPM limit issue logger.warning("Failed to get response from OpenAI, trying again in 30 seconds") time.sleep(30) ai_message = llm([system_message, user_message]) logger.info(f"AI message: {ai_message.content}") code_to_execute = extract_code(ai_message.content) return code_to_execute def get_task_status(observation): if observation.env_state.has_successfully_completed: return TASK_STATUS.SUCCESS elif observation.env_state.has_failed: return TASK_STATUS.FAILED else: return TASK_STATUS.IN_PROGRESS def act(url, task, max_actions=40, **kwargs): task = Task(task=task, args=kwargs) browser = BrowserEnv(headless=False) observation = browser.reset(url) for i in range(max_actions): action = calcualte_next_action(task, observation) observation = browser.step(action, observation.marked_elements) task_status = get_task_status(observation) if task_status in [TASK_STATUS.SUCCESS, TASK_STATUS.FAILED]: return task_status, observation.env_state.output logger.warning(f"Reached {i} actions without completing the task.") return TASK_STATUS.FAILED, observation.env_state.output
[ "langchain.schema.SystemMessage", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((233, 260), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (250, 260), False, 'import logging\n'), ((277, 326), 'enum.Enum', 'Enum', (['"""TASK_STATUS"""', '"""IN_PROGRESS SUCCESS FAILED"""'], {}), "('TASK_STATUS', 'IN_PROGRESS SUCCESS FAILED')\n", (281, 326), False, 'from enum import Enum\n'), ((461, 563), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4-vision-preview"""', 'temperature': '(1)', 'request_timeout': '(120)', 'max_tokens': '(2000)'}), "(model_name='gpt-4-vision-preview', temperature=1,\n request_timeout=120, max_tokens=2000)\n", (471, 563), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1653, 1704), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '[text_content, image_content]'}), '(content=[text_content, image_content])\n', (1665, 1704), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((4455, 4491), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (4468, 4491), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((5890, 5916), 'pywebagent.env.browser.BrowserEnv', 'BrowserEnv', ([], {'headless': '(False)'}), '(headless=False)\n', (5900, 5916), False, 'from pywebagent.env.browser import BrowserEnv\n'), ((1224, 1255), 'json.dumps', 'json.dumps', (['task.args'], {'indent': '(4)'}), '(task.args, indent=4)\n', (1234, 1255), False, 'import json\n'), ((1322, 1357), 'base64.b64encode', 'base64.b64encode', (['screenshot_binary'], {}), '(screenshot_binary)\n', (1338, 1357), False, 'import base64\n'), ((5312, 5326), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (5322, 5326), False, 'import time\n')]
"""Load markdown, html, text from files, clean up, split, ingest into Pinecone.""" import pinecone import tiktoken from langchain.document_loaders import ReadTheDocsLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import NLTKTextSplitter from langchain.vectorstores.pinecone import Pinecone def ingest_docs(): """Get documents from web pages.""" loader = ReadTheDocsLoader("hasura.io/docs/latest/") raw_documents = loader.load() text_splitter = NLTKTextSplitter.from_tiktoken_encoder( chunk_size=800, chunk_overlap=400, ) documents = text_splitter.split_documents(raw_documents) embeddings = OpenAIEmbeddings() pinecone.init( api_key="YOUR_API_KEY", # find at app.pinecone.io environment="YOUR_ENV", # next to api key in console ) Pinecone.from_documents(documents, embeddings) if __name__ == "__main__": ingest_docs()
[ "langchain.text_splitter.NLTKTextSplitter.from_tiktoken_encoder", "langchain.document_loaders.ReadTheDocsLoader", "langchain.embeddings.OpenAIEmbeddings", "langchain.vectorstores.pinecone.Pinecone.from_documents" ]
[((402, 445), 'langchain.document_loaders.ReadTheDocsLoader', 'ReadTheDocsLoader', (['"""hasura.io/docs/latest/"""'], {}), "('hasura.io/docs/latest/')\n", (419, 445), False, 'from langchain.document_loaders import ReadTheDocsLoader\n'), ((500, 573), 'langchain.text_splitter.NLTKTextSplitter.from_tiktoken_encoder', 'NLTKTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': '(800)', 'chunk_overlap': '(400)'}), '(chunk_size=800, chunk_overlap=400)\n', (538, 573), False, 'from langchain.text_splitter import NLTKTextSplitter\n'), ((675, 693), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (691, 693), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((698, 759), 'pinecone.init', 'pinecone.init', ([], {'api_key': '"""YOUR_API_KEY"""', 'environment': '"""YOUR_ENV"""'}), "(api_key='YOUR_API_KEY', environment='YOUR_ENV')\n", (711, 759), False, 'import pinecone\n'), ((844, 890), 'langchain.vectorstores.pinecone.Pinecone.from_documents', 'Pinecone.from_documents', (['documents', 'embeddings'], {}), '(documents, embeddings)\n', (867, 890), False, 'from langchain.vectorstores.pinecone import Pinecone\n')]
from typing import List, Optional, Any, Dict from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env from pydantic import Extra, root_validator from sam.gpt.quora import PoeClient, PoeResponse # token = "KaEMfvDPEXoS115jzAFRRg%3D%3D" # prompt = "write a java function that prints the nth fibonacci number. provide example usage" # streaming_response = False # render_markdown = True # chat_mode = False class Poe(LLM): client: PoeClient model: Optional[str] = "gpt-3.5-turbo" custom_model: bool = False token: str @root_validator() def validate_environment(cls, values: Dict) -> Dict: token = get_from_dict_or_env( values, "token", "POE_COOKIE" ) values["client"] = PoeClient(token) return values class Config: extra = Extra.forbid @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" models = { 'sage': 'capybara', 'gpt-4': 'beaver', 'claude-v1.2': 'a2_2', 'claude-instant-v1.0': 'a2', 'gpt-3.5-turbo': 'chinchilla', } _model = models[self.model] if not self.custom_model else self.model return { "model": _model, "token": self.token, } @property def _identifying_params(self) -> Dict[str, Any]: return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: return "poe" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: params = self._default_params for chunk in self.client.send_message(params.model, prompt): pass response = PoeResponse( { 'id': chunk['messageId'], 'object': 'text_completion', 'created': chunk['creationTime'], 'model': params.model, 'choices': [ { 'text': chunk['text'], 'index': 0, 'logprobs': None, 'finish_reason': 'stop', } ], 'usage': { 'prompt_tokens': len(prompt), 'completion_tokens': len(chunk['text']), 'total_tokens': len(prompt) + len(chunk['text']), }, } ) text = response.completion.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 714), False, 'from langchain.utils import get_from_dict_or_env\n'), ((765, 781), 'sam.gpt.quora.PoeClient', 'PoeClient', (['token'], {}), '(token)\n', (774, 781), False, 'from sam.gpt.quora import PoeClient, PoeResponse\n')]
from __future__ import annotations from typing import List, Optional from pydantic import ValidationError from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.experimental.autonomous_agents.autogpt.output_parser import ( AutoGPTOutputParser, BaseAutoGPTOutputParser, ) from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt from langchain.experimental.autonomous_agents.autogpt.prompt_generator import ( FINISH_NAME, ) from langchain.schema import ( AIMessage, BaseMessage, Document, HumanMessage, SystemMessage, ) from langchain.tools.base import BaseTool from langchain.tools.human.tool import HumanInputRun from langchain.vectorstores.base import VectorStoreRetriever class AutoGPT: """Agent class for interacting with Auto-GPT.""" def __init__( self, ai_name: str, memory: VectorStoreRetriever, chain: LLMChain, output_parser: BaseAutoGPTOutputParser, tools: List[BaseTool], feedback_tool: Optional[HumanInputRun] = None, ): self.ai_name = ai_name self.memory = memory self.full_message_history: List[BaseMessage] = [] self.next_action_count = 0 self.chain = chain self.output_parser = output_parser self.tools = tools self.feedback_tool = feedback_tool @classmethod def from_llm_and_tools( cls, ai_name: str, ai_role: str, memory: VectorStoreRetriever, tools: List[BaseTool], llm: BaseChatModel, human_in_the_loop: bool = False, output_parser: Optional[BaseAutoGPTOutputParser] = None, ) -> AutoGPT: prompt = AutoGPTPrompt( ai_name=ai_name, ai_role=ai_role, tools=tools, input_variables=["memory", "messages", "goals", "user_input"], token_counter=llm.get_num_tokens, ) human_feedback_tool = HumanInputRun() if human_in_the_loop else None chain = LLMChain(llm=llm, prompt=prompt) return cls( ai_name, memory, chain, output_parser or AutoGPTOutputParser(), tools, feedback_tool=human_feedback_tool, ) def run(self, goals: List[str]) -> str: user_input = ( "Determine which next command to use, " "and respond using the format specified above:" ) # Interaction Loop loop_count = 0 while True: # Discontinue if continuous limit is reached loop_count += 1 # Send message to AI, get response assistant_reply = self.chain.run( goals=goals, messages=self.full_message_history, memory=self.memory, user_input=user_input, ) # Print Assistant thoughts print(assistant_reply) self.full_message_history.append(HumanMessage(content=user_input)) self.full_message_history.append(AIMessage(content=assistant_reply)) # Get command name and arguments action = self.output_parser.parse(assistant_reply) tools = {t.name: t for t in self.tools} if action.name == FINISH_NAME: return action.args["response"] if action.name in tools: tool = tools[action.name] try: observation = tool.run(action.args) except ValidationError as e: observation = ( f"Validation Error in args: {str(e)}, args: {action.args}" ) except Exception as e: observation = ( f"Error: {str(e)}, {type(e).__name__}, args: {action.args}" ) result = f"Command {tool.name} returned: {observation}" elif action.name == "ERROR": result = f"Error: {action.args}. " else: result = ( f"Unknown command '{action.name}'. " f"Please refer to the 'COMMANDS' list for available " f"commands and only respond in the specified JSON format." ) memory_to_add = ( f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " ) if self.feedback_tool is not None: feedback = f"\n{self.feedback_tool.run('Input: ')}" if feedback in {"q", "stop"}: print("EXITING") return "EXITING" memory_to_add += feedback self.memory.add_documents([Document(page_content=memory_to_add)]) self.full_message_history.append(SystemMessage(content=result))
[ "langchain.chains.llm.LLMChain", "langchain.tools.human.tool.HumanInputRun", "langchain.schema.AIMessage", "langchain.schema.HumanMessage", "langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt", "langchain.schema.Document", "langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser", "langchain.schema.SystemMessage" ]
[((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, tools=tools,\n input_variables=['memory', 'messages', 'goals', 'user_input'],\n token_counter=llm.get_num_tokens)\n", (1766, 1918), False, 'from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt\n'), ((2075, 2107), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2083, 2107), False, 'from langchain.chains.llm import LLMChain\n'), ((2012, 2027), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2025, 2027), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2217, 2238), 'langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser', 'AutoGPTOutputParser', ([], {}), '()\n', (2236, 2238), False, 'from langchain.experimental.autonomous_agents.autogpt.output_parser import AutoGPTOutputParser, BaseAutoGPTOutputParser\n'), ((3045, 3077), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (3057, 3077), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((3124, 3158), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'assistant_reply'}), '(content=assistant_reply)\n', (3133, 3158), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4895, 4924), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'result'}), '(content=result)\n', (4908, 4924), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4811, 4847), 'langchain.schema.Document', 'Document', ([], {'page_content': 'memory_to_add'}), '(page_content=memory_to_add)\n', (4819, 4847), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n')]
"""Main entrypoint for the app.""" import asyncio import os from operator import itemgetter from typing import List, Optional, Sequence, Tuple, Union from uuid import UUID from fastapi import Depends, FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.chat_models import ChatAnthropic, ChatOpenAI, ChatVertexAI from langchain.document_loaders import AsyncHtmlLoader from langchain.document_transformers import Html2TextTransformer from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate from langchain.retrievers import ( ContextualCompressionRetriever, TavilySearchAPIRetriever, ) from langchain.retrievers.document_compressors import ( DocumentCompressorPipeline, EmbeddingsFilter, ) from langchain.retrievers.kay import KayAiRetriever from langchain.retrievers.you import YouRetriever from langchain.schema import Document from langchain.schema.document import Document from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import AIMessage, HumanMessage from langchain.schema.output_parser import StrOutputParser from langchain.schema.retriever import BaseRetriever from langchain.schema.runnable import ( ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap, ) from langchain.text_splitter import RecursiveCharacterTextSplitter # Backup from langchain.utilities import GoogleSearchAPIWrapper from langserve import add_routes from pydantic import BaseModel, Field EN_PROMPT = False if EN_PROMPT: RESPONSE_TEMPLATE = """\ You are an expert researcher and writer, tasked with answering any question. Generate a comprehensive and informative, yet concise answer of 250 words or less for the \ given question based solely on the provided search results (URL and content). You must \ only use information from the provided search results. Use an unbiased and \ journalistic tone. Combine search results together into a coherent answer. Do not \ repeat text. Cite search results using [${{number}}] notation. Only cite the most \ relevant results that answer the question accurately. Place these citations at the end \ of the sentence or paragraph that reference them - do not put them all at the end. If \ different results refer to different entities within the same name, write separate \ answers for each entity. If you want to cite multiple results for the same sentence, \ format it as `[${{number1}}] [${{number2}}]`. However, you should NEVER do this with the \ same number - if you want to cite `number1` multiple times for a sentence, only do \ `[${{number1}}]` not `[${{number1}}] [${{number1}}]` You should use bullet points in your answer for readability. Put citations where they apply \ rather than putting them all at the end. If there is nothing in the context relevant to the question at hand, just say "Hmm, \ I'm not sure." Don't try to make up an answer. Anything between the following `context` html blocks is retrieved from a knowledge \ bank, not part of the conversation with the user. <context> {context} <context/> REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm \ not sure." Don't try to make up an answer. Anything between the preceding 'context' \ html blocks is retrieved from a knowledge bank, not part of the conversation with the \ user.\ """ REPHRASE_TEMPLATE = """\ Given the following conversation and a follow up question, rephrase the follow up \ question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} Standalone Question:""" else: RESPONSE_TEMPLATE = """\ 您是一位专业的研究员和作家,负责回答任何问题。 基于提供的搜索结果(URL 和内容),为给定的问题生成一个全面而且信息丰富、但简洁的答案,长度不超过 250 字。您必须只使用来自提供的搜索结果的信息。使用公正和新闻性的语气。将搜索结果合并成一个连贯的答案。不要重复文本。一定要使用 [${{number}}] 标记引用搜索结果,其中 number 代表搜索到的文档的 id 号,用 <doc id=\'x\'> 表示。只引用最相关的结果,以准确回答问题。将这些引用放在提到它们的句子或段落的末尾 - 不要全部放在末尾。如果不同的结果涉及同名实体的不同部分,请为每个实体编写单独的答案。如果要在同一句子中引用多个结果,请将其格式化为 [${{number1}}] [${{number2}}]。然而,您绝对不应该对相同的数字进行这样的操作 - 如果要在一句话中多次引用 number1,只需使用 [${{number1}}],而不是 [${{number1}}] [${{number1}}]。 为了使您的答案更易读,您应该在答案中使用项目符号。在适用的地方放置引用,而不是全部放在末尾。 如果上下文中没有与当前问题相关的信息,只需说“嗯,我不确定。”不要试图编造答案。 位于以下context HTML 块之间的任何内容都是从知识库中检索到的,而不是与用户的对话的一部分。 <context> {context} <context/> 请记住:一定要在回答的时候带上检索的内容来源标号。如果上下文中没有与问题相关的信息,只需说“嗯,我不确定。”不要试图编造答案。位于上述 'context' HTML 块之前的任何内容都是从知识库中检索到的,而不是与用户的对话的一部分。再次记住一定要在回答的时候带上检索的内容来源标号,比如回答的某句话的信息来源于第 <doc id=\'x\'> 的搜索结果,就在该句话的末尾使用 [${{x}}] 来进行标记。 """ REPHRASE_TEMPLATE = """\ 考虑到以下对话和一个后续问题,请将后续问题重新表达为独立的问题。 聊天记录: {chat_history} 后续输入:{question} 独立问题:""" app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], expose_headers=["*"], ) class ChatRequest(BaseModel): question: str chat_history: List[Tuple[str, str]] = Field( ..., extra={"widget": {"type": "chat", "input": "question", "output": "answer"}}, ) class GoogleCustomSearchRetriever(BaseRetriever): search: Optional[GoogleSearchAPIWrapper] = None num_search_results = 3 def clean_search_query(self, query: str) -> str: # Some search tools (e.g., Google) will # fail to return results if query has a # leading digit: 1. "LangCh..." # Check if the first character is a digit if query[0].isdigit(): # Find the position of the first quote first_quote_pos = query.find('"') if first_quote_pos != -1: # Extract the part of the string after the quote query = query[first_quote_pos + 1 :] # Remove the trailing quote if present if query.endswith('"'): query = query[:-1] return query.strip() def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]: """Returns num_search_results pages per Google search.""" query_clean = self.clean_search_query(query) result = self.search.results(query_clean, num_search_results) return result def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ): if os.environ.get("GOOGLE_API_KEY", None) == None: raise Exception("No Google API key provided") if self.search == None: self.search = GoogleSearchAPIWrapper() # Get search questions print("Generating questions for Google Search ...") # Get urls print("Searching for relevant urls...") urls_to_look = [] search_results = self.search_tool(query, self.num_search_results) print("Searching for relevant urls...") print(f"Search results: {search_results}") for res in search_results: if res.get("link", None): urls_to_look.append(res["link"]) print(search_results) loader = AsyncHtmlLoader(urls_to_look) html2text = Html2TextTransformer() print("Indexing new urls...") docs = loader.load() docs = list(html2text.transform_documents(docs)) for i in range(len(docs)): if search_results[i].get("title", None): docs[i].metadata["title"] = search_results[i]["title"] return docs def get_retriever(): embeddings = OpenAIEmbeddings() splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=20) relevance_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8) pipeline_compressor = DocumentCompressorPipeline( transformers=[splitter, relevance_filter] ) base_tavily_retriever = TavilySearchAPIRetriever( k=3, include_raw_content=True, include_images=True, ) tavily_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_tavily_retriever ) base_google_retriever = GoogleCustomSearchRetriever() google_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_google_retriever ) base_you_retriever = YouRetriever( ydc_api_key=os.environ.get("YDC_API_KEY", "not_provided") ) you_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_you_retriever ) base_kay_retriever = KayAiRetriever.create( dataset_id="company", data_types=["10-K", "10-Q"], num_contexts=6, ) kay_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_kay_retriever ) base_kay_press_release_retriever = KayAiRetriever.create( dataset_id="company", data_types=["PressRelease"], num_contexts=6, ) kay_press_release_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_kay_press_release_retriever, ) return tavily_retriever.configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="retriever"), default_key="tavily", google=google_retriever, you=you_retriever, kay=kay_retriever, kay_press_release=kay_press_release_retriever, ).with_config(run_name="FinalSourceRetriever") def create_retriever_chain( llm: BaseLanguageModel, retriever: BaseRetriever ) -> Runnable: CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(REPHRASE_TEMPLATE) condense_question_chain = ( CONDENSE_QUESTION_PROMPT | llm | StrOutputParser() ).with_config( run_name="CondenseQuestion", ) conversation_chain = condense_question_chain | retriever return RunnableBranch( ( RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config( run_name="HasChatHistoryCheck" ), conversation_chain.with_config(run_name="RetrievalChainWithHistory"), ), ( RunnableLambda(itemgetter("question")).with_config( run_name="Itemgetter:question" ) | retriever ).with_config(run_name="RetrievalChainWithNoHistory"), ).with_config(run_name="RouteDependingOnChatHistory") def serialize_history(request: ChatRequest): chat_history = request.get("chat_history", []) converted_chat_history = [] for message in chat_history: if message[0] == "human": converted_chat_history.append(HumanMessage(content=message[1])) elif message[0] == "ai": converted_chat_history.append(AIMessage(content=message[1])) return converted_chat_history def format_docs(docs: Sequence[Document]) -> str: formatted_docs = [] for i, doc in enumerate(docs): doc_string = f"<doc id='{i}'>{doc.page_content}</doc>" formatted_docs.append(doc_string) return "\n".join(formatted_docs) def create_chain( llm: BaseLanguageModel, retriever: BaseRetriever, ) -> Runnable: retriever_chain = create_retriever_chain(llm, retriever) | RunnableLambda( format_docs ).with_config(run_name="FormatDocumentChunks") _context = RunnableMap( { "context": retriever_chain.with_config(run_name="RetrievalChain"), "question": RunnableLambda(itemgetter("question")).with_config( run_name="Itemgetter:question" ), "chat_history": RunnableLambda(itemgetter("chat_history")).with_config( run_name="Itemgetter:chat_history" ), } ) prompt = ChatPromptTemplate.from_messages( [ ("system", RESPONSE_TEMPLATE), MessagesPlaceholder(variable_name="chat_history"), ("human", "{question}"), ] ) response_synthesizer = (prompt | llm | StrOutputParser()).with_config( run_name="GenerateResponse", ) return ( { "question": RunnableLambda(itemgetter("question")).with_config( run_name="Itemgetter:question" ), "chat_history": RunnableLambda(serialize_history).with_config( run_name="SerializeHistory" ), } | _context | response_synthesizer ) dir_path = os.path.dirname(os.path.realpath(__file__)) os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = ( dir_path + "/" + ".google_vertex_ai_credentials.json" ) has_google_creds = os.path.isfile(os.environ["GOOGLE_APPLICATION_CREDENTIALS"]) openai_api_base = "http://127.0.0.1:8000/v1" llm = ChatOpenAI( model="gpt-3.5-turbo-16k", # model="gpt-4", streaming=True, temperature=0.1, ).configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="llm"), default_key="openai", chatglm=ChatOpenAI(model="chatglm3-6b", openai_api_base=openai_api_base) ) if has_google_creds: llm = ChatOpenAI( model="gpt-3.5-turbo-16k", # model="gpt-4", streaming=True, temperature=0.1, ).configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="llm"), default_key="openai", ) retriever = get_retriever() chain = create_chain(llm, retriever) add_routes( app, chain, path="/chat", input_type=ChatRequest, config_keys=["configurable"] ) if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8080)
[ "langchain.schema.runnable.ConfigurableField", "langchain.schema.messages.HumanMessage", "langchain.prompts.PromptTemplate.from_template", "langchain.chat_models.ChatOpenAI", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.document_loaders.AsyncHtmlLoader", "langchain.schema.output_parser.StrOutputParser", "langchain.schema.messages.AIMessage", "langchain.retrievers.kay.KayAiRetriever.create", "langchain.retrievers.document_compressors.EmbeddingsFilter", "langchain.utilities.GoogleSearchAPIWrapper", "langchain.embeddings.OpenAIEmbeddings", "langchain.prompts.MessagesPlaceholder", "langchain.retrievers.document_compressors.DocumentCompressorPipeline", "langchain.retrievers.TavilySearchAPIRetriever", "langchain.retrievers.ContextualCompressionRetriever", "langchain.schema.runnable.RunnableLambda", "langchain.document_transformers.Html2TextTransformer" ]
[((4923, 4932), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (4930, 4932), False, 'from fastapi import Depends, FastAPI, Request\n'), ((12927, 12987), 'os.path.isfile', 'os.path.isfile', (["os.environ['GOOGLE_APPLICATION_CREDENTIALS']"], {}), "(os.environ['GOOGLE_APPLICATION_CREDENTIALS'])\n", (12941, 12987), False, 'import os\n'), ((13894, 13989), 'langserve.add_routes', 'add_routes', (['app', 'chain'], {'path': '"""/chat"""', 'input_type': 'ChatRequest', 'config_keys': "['configurable']"}), "(app, chain, path='/chat', input_type=ChatRequest, config_keys=[\n 'configurable'])\n", (13904, 13989), False, 'from langserve import add_routes\n'), ((5196, 5287), 'pydantic.Field', 'Field', (['...'], {'extra': "{'widget': {'type': 'chat', 'input': 'question', 'output': 'answer'}}"}), "(..., extra={'widget': {'type': 'chat', 'input': 'question', 'output':\n 'answer'}})\n", (5201, 5287), False, 'from pydantic import BaseModel, Field\n'), ((7679, 7697), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (7695, 7697), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((7713, 7777), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(800)', 'chunk_overlap': '(20)'}), '(chunk_size=800, chunk_overlap=20)\n', (7743, 7777), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((7801, 7866), 'langchain.retrievers.document_compressors.EmbeddingsFilter', 'EmbeddingsFilter', ([], {'embeddings': 'embeddings', 'similarity_threshold': '(0.8)'}), '(embeddings=embeddings, similarity_threshold=0.8)\n', (7817, 7866), False, 'from langchain.retrievers.document_compressors import DocumentCompressorPipeline, EmbeddingsFilter\n'), ((7893, 7962), 'langchain.retrievers.document_compressors.DocumentCompressorPipeline', 'DocumentCompressorPipeline', ([], {'transformers': '[splitter, relevance_filter]'}), '(transformers=[splitter, relevance_filter])\n', (7919, 7962), False, 'from langchain.retrievers.document_compressors import DocumentCompressorPipeline, EmbeddingsFilter\n'), ((8005, 8081), 'langchain.retrievers.TavilySearchAPIRetriever', 'TavilySearchAPIRetriever', ([], {'k': '(3)', 'include_raw_content': '(True)', 'include_images': '(True)'}), '(k=3, include_raw_content=True, include_images=True)\n', (8029, 8081), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((8136, 8245), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_tavily_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_tavily_retriever)\n', (8166, 8245), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((8337, 8446), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_google_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_google_retriever)\n', (8367, 8446), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((8588, 8694), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_you_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_you_retriever)\n', (8618, 8694), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((8730, 8822), 'langchain.retrievers.kay.KayAiRetriever.create', 'KayAiRetriever.create', ([], {'dataset_id': '"""company"""', 'data_types': "['10-K', '10-Q']", 'num_contexts': '(6)'}), "(dataset_id='company', data_types=['10-K', '10-Q'],\n num_contexts=6)\n", (8751, 8822), False, 'from langchain.retrievers.kay import KayAiRetriever\n'), ((8870, 8976), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_kay_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_kay_retriever)\n', (8900, 8976), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((9026, 9118), 'langchain.retrievers.kay.KayAiRetriever.create', 'KayAiRetriever.create', ([], {'dataset_id': '"""company"""', 'data_types': "['PressRelease']", 'num_contexts': '(6)'}), "(dataset_id='company', data_types=['PressRelease'],\n num_contexts=6)\n", (9047, 9118), False, 'from langchain.retrievers.kay import KayAiRetriever\n'), ((9180, 9300), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_kay_press_release_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_kay_press_release_retriever)\n', (9210, 9300), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((9901, 9948), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['REPHRASE_TEMPLATE'], {}), '(REPHRASE_TEMPLATE)\n', (9929, 9948), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate\n'), ((12769, 12795), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12785, 12795), False, 'import os\n'), ((13302, 13329), 'langchain.schema.runnable.ConfigurableField', 'ConfigurableField', ([], {'id': '"""llm"""'}), "(id='llm')\n", (13319, 13329), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((14044, 14087), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8080)'}), "(app, host='0.0.0.0', port=8080)\n", (14055, 14087), False, 'import uvicorn\n'), ((7263, 7292), 'langchain.document_loaders.AsyncHtmlLoader', 'AsyncHtmlLoader', (['urls_to_look'], {}), '(urls_to_look)\n', (7278, 7292), False, 'from langchain.document_loaders import AsyncHtmlLoader\n'), ((7313, 7335), 'langchain.document_transformers.Html2TextTransformer', 'Html2TextTransformer', ([], {}), '()\n', (7333, 7335), False, 'from langchain.document_transformers import Html2TextTransformer\n'), ((13041, 13111), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-16k"""', 'streaming': '(True)', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo-16k', streaming=True, temperature=0.1)\n", (13051, 13111), False, 'from langchain.chat_models import ChatAnthropic, ChatOpenAI, ChatVertexAI\n'), ((13369, 13433), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""chatglm3-6b"""', 'openai_api_base': 'openai_api_base'}), "(model='chatglm3-6b', openai_api_base=openai_api_base)\n", (13379, 13433), False, 'from langchain.chat_models import ChatAnthropic, ChatOpenAI, ChatVertexAI\n'), ((13761, 13788), 'langchain.schema.runnable.ConfigurableField', 'ConfigurableField', ([], {'id': '"""llm"""'}), "(id='llm')\n", (13778, 13788), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((6544, 6582), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_API_KEY"""', 'None'], {}), "('GOOGLE_API_KEY', None)\n", (6558, 6582), False, 'import os\n'), ((6709, 6733), 'langchain.utilities.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {}), '()\n', (6731, 6733), False, 'from langchain.utilities import GoogleSearchAPIWrapper\n'), ((8516, 8561), 'os.environ.get', 'os.environ.get', (['"""YDC_API_KEY"""', '"""not_provided"""'], {}), "('YDC_API_KEY', 'not_provided')\n", (8530, 8561), False, 'import os\n'), ((12156, 12205), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""chat_history"""'}), "(variable_name='chat_history')\n", (12175, 12205), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate\n'), ((13468, 13538), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-16k"""', 'streaming': '(True)', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo-16k', streaming=True, temperature=0.1)\n", (13478, 13538), False, 'from langchain.chat_models import ChatAnthropic, ChatOpenAI, ChatVertexAI\n'), ((9514, 9547), 'langchain.schema.runnable.ConfigurableField', 'ConfigurableField', ([], {'id': '"""retriever"""'}), "(id='retriever')\n", (9531, 9547), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((10022, 10039), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (10037, 10039), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((10953, 10985), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'message[1]'}), '(content=message[1])\n', (10965, 10985), False, 'from langchain.schema.messages import AIMessage, HumanMessage\n'), ((11536, 11563), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['format_docs'], {}), '(format_docs)\n', (11550, 11563), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((12304, 12321), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (12319, 12321), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((11062, 11091), 'langchain.schema.messages.AIMessage', 'AIMessage', ([], {'content': 'message[1]'}), '(content=message[1])\n', (11071, 11091), False, 'from langchain.schema.messages import AIMessage, HumanMessage\n'), ((11779, 11801), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (11789, 11801), False, 'from operator import itemgetter\n'), ((11921, 11947), 'operator.itemgetter', 'itemgetter', (['"""chat_history"""'], {}), "('chat_history')\n", (11931, 11947), False, 'from operator import itemgetter\n'), ((12568, 12601), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['serialize_history'], {}), '(serialize_history)\n', (12582, 12601), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((12441, 12463), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (12451, 12463), False, 'from operator import itemgetter\n'), ((10471, 10493), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (10481, 10493), False, 'from operator import itemgetter\n')]
from langchain.chat_models import ChatOpenAI from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner from langchain.llms import OpenAI from langchain import SerpAPIWrapper from langchain.agents.tools import Tool from langchain import LLMMathChain search = SerpAPIWrapper() llm = OpenAI(temperature=0) llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True) tools = [ Tool( name = "Search", func=search.run, description="useful for when you need to answer questions about current events" ), Tool( name="Calculator", func=llm_math_chain.run, description="useful for when you need to answer questions about math" ), ] model = ChatOpenAI(temperature=0) planner = load_chat_planner(model) executor = load_agent_executor(model, tools, verbose=True) agent = PlanAndExecute(planner=planner, executor=executor, verbose=True) agent.run("中国前2的特种磨具陶瓷企业,生产1000份磨具陶瓷需要多少钱?")
[ "langchain_experimental.plan_and_execute.load_chat_planner", "langchain.chat_models.ChatOpenAI", "langchain.LLMMathChain.from_llm", "langchain.llms.OpenAI", "langchain.agents.tools.Tool", "langchain_experimental.plan_and_execute.load_agent_executor", "langchain.SerpAPIWrapper", "langchain_experimental.plan_and_execute.PlanAndExecute" ]
[((308, 324), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (322, 324), False, 'from langchain import SerpAPIWrapper\n'), ((331, 352), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (337, 352), False, 'from langchain.llms import OpenAI\n'), ((370, 414), 'langchain.LLMMathChain.from_llm', 'LLMMathChain.from_llm', ([], {'llm': 'llm', 'verbose': '(True)'}), '(llm=llm, verbose=True)\n', (391, 414), False, 'from langchain import LLMMathChain\n'), ((745, 770), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (755, 770), False, 'from langchain.chat_models import ChatOpenAI\n'), ((781, 805), 'langchain_experimental.plan_and_execute.load_chat_planner', 'load_chat_planner', (['model'], {}), '(model)\n', (798, 805), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((817, 864), 'langchain_experimental.plan_and_execute.load_agent_executor', 'load_agent_executor', (['model', 'tools'], {'verbose': '(True)'}), '(model, tools, verbose=True)\n', (836, 864), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((873, 937), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)'}), '(planner=planner, executor=executor, verbose=True)\n', (887, 937), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((429, 551), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to answer questions about current events"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to answer questions about current events')\n", (433, 551), False, 'from langchain.agents.tools import Tool\n'), ((584, 708), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Calculator"""', 'func': 'llm_math_chain.run', 'description': '"""useful for when you need to answer questions about math"""'}), "(name='Calculator', func=llm_math_chain.run, description=\n 'useful for when you need to answer questions about math')\n", (588, 708), False, 'from langchain.agents.tools import Tool\n')]
"""Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains import ReduceDocumentsChain from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.pydantic_v1 import Extra from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.text_splitter import TextSplitter class MapReduceChain(Chain): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: @classmethod def from_params( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, text_splitter: TextSplitter, callbacks: Callbacks = None, combine_chain_kwargs: Optional[Mapping[str, Any]] = None, reduce_chain_kwargs: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) stuff_chain = StuffDocumentsChain( llm_chain=llm_chain, callbacks=callbacks, **(reduce_chain_kwargs if reduce_chain_kwargs else {}), ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=stuff_chain ) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, callbacks=callbacks, **(combine_chain_kwargs if combine_chain_kwargs else {}), ) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter, callbacks=callbacks, **kwargs, ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Split the larger text into smaller chunks. doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = { **inputs, self.combine_documents_chain.input_key: docs, } outputs = self.combine_documents_chain.run( _inputs, callbacks=_run_manager.get_child() ) return {self.output_key: outputs}
[ "langchain.chains.llm.LLMChain", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager", "langchain.chains.ReduceDocumentsChain", "langchain.docstore.document.Document", "langchain.chains.combine_documents.stuff.StuffDocumentsChain" ]
[((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')]
import os import re import argparse import json import boto3 from bs4 import BeautifulSoup from langchain.document_loaders import PDFMinerPDFasHTMLLoader from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter,CharacterTextSplitter import statistics smr_client = boto3.client("sagemaker-runtime") parameters = { "max_length": 2048, "temperature": 0.01, "num_beams": 1, # >1可能会报错,"probability tensor contains either `inf`, `nan` or element < 0"; 即使remove_invalid_values=True也不能解决 "do_sample": False, "top_p": 0.7, "logits_processor" : None, # "remove_invalid_values" : True } ''' 1. pip install pdfminer.six ''' class Elembbox(object): left = -1 top = -1 width = -1 height = -1 right = -1 bottom = -1 margin = 8 # for header text above table RAW_MAX_DIST = 120 COL_MAX_DIST = 400 # top 增加是往下 bottom > top # right 增加是往右 right > left def __init__(self, left, top, width, height): self.left = left self.top = top self.width = width self.height = height self.right = left + width self.bottom = top + height # def __str__(self): # return "left:{}, top:{}, right:{}, bottom:{}, width:{}, height:{}".format(self.left, self.top, self.right, self.bottom, self.width, self.height) def __str__(self): return """<span style="position:absolute; border: red 1px solid; left:{}px; top:{}px; width:{}px; height:{}px;"></span>""".format(self.left, self.top, self.width, self.height) def is_overlap(self, other): if other is None: return False def is_pt_in_bbox(x, y, bbox): return x >= bbox.left \ and x <= bbox.right \ and y >= bbox.top - bbox.margin \ and y <= bbox.bottom + bbox.margin lefttop_in = is_pt_in_bbox(other.left, other.top, self) leftbottom_in = is_pt_in_bbox(other.left, other.bottom, self) righttop_in = is_pt_in_bbox(other.right, other.top, self) rightbottom_in = is_pt_in_bbox(other.right, other.bottom, self) lefttop_in_2 = is_pt_in_bbox(self.left, self.top, other) leftbottom_in_2 = is_pt_in_bbox(self.left, self.bottom, other) righttop_in_2 = is_pt_in_bbox(self.right, self.top, other) rightbottom_in_2 = is_pt_in_bbox(self.right, self.bottom, other) return lefttop_in or leftbottom_in or righttop_in or rightbottom_in or lefttop_in_2 or leftbottom_in_2 or righttop_in_2 or rightbottom_in_2 def is_beside(self, other): # only horizontal direction return self.is_overlap(other) def link_horizontal_lines(self, other): assert(self.height == 0) # merge the horizontal line if self.top == other.top: if self.left < other.left and other.left-1 <= self.right and other.right > self.right: # self.left < other.left <=self.right < other.right return True, Elembbox(self.left, self.top, other.right - self.left, self.height) elif other.left < self.left and self.left-1 <= other.right and other.right < self.right: # other.left < self.left <=other.right < self.right return True, Elembbox(other.left, self.top, self.right - other.left, self.height) return False, None def merge_horizontal_lines(self, other): if self.left == other.left and self.right == other.right: if other.top > self.bottom + self.RAW_MAX_DIST: return False, None if self.bottom < other.top: return True, Elembbox(self.left, self.top, self.width, other.top - self.top) return False, None def link_vertical_lines(self, other): assert(self.width == 0) # merge the vertical line if self.left == self.left: if self.top < other.top and other.top-1 <= self.bottom and self.bottom < other.bottom: # self.top < other.top <=self.bottom < other.bottom return True, Elembbox(self.left, self.top, self.width, other.bottom - self.top) elif other.top < self.top and self.top-1 <= other.bottom and other.bottom < self.bottom: return True, Elembbox(self.left, other.top, self.width, self.bottom - other.top) return False, None def merge_vertical_lines(self, other): if self.top == other.top and self.bottom == other.bottom: if other.right > self.right + self.COL_MAX_DIST: return False, None if self.right < other.right: return True, Elembbox(self.left, self.top, other.right - self.left, self.height) return False, None def create_bbox_horizontal(origin_span): vertical_sorted_span = sorted(origin_span, key=lambda span_pos: (span_pos.top, span_pos.left)) span_count = len(vertical_sorted_span) cur_span = vertical_sorted_span[0] merge_stage1_spans = [] for idx in range(1, span_count): success, new_span = cur_span.link_horizontal_lines(vertical_sorted_span[idx]) if not success: merge_stage1_spans.append(cur_span) cur_span = vertical_sorted_span[idx] else: cur_span = new_span merge_stage1_spans.append(cur_span) vertical_sorted_merge_spans = sorted(merge_stage1_spans, key=lambda span_pos: span_pos.left) # for item in vertical_sorted_merge_spans: # print(item) # print('---------------') merge_stage2_spans = [] cur_span = vertical_sorted_merge_spans[0] span_count = len(vertical_sorted_merge_spans) for idx in range(1, span_count): success, new_span = cur_span.merge_horizontal_lines(vertical_sorted_merge_spans[idx]) if not success: merge_stage2_spans.append(cur_span) cur_span = vertical_sorted_merge_spans[idx] else: cur_span = new_span return [ item for item in merge_stage2_spans if item.height >0 ] def create_bbox_vertical(origin_span): horizontal_sorted_span = sorted(origin_span, key=lambda span_pos: (span_pos.left, span_pos.top)) span_count = len(horizontal_sorted_span) cur_span = horizontal_sorted_span[0] merge_stage1_spans = [] for idx in range(1, span_count): success, new_span = cur_span.link_vertical_lines(horizontal_sorted_span[idx]) if not success: merge_stage1_spans.append(cur_span) cur_span = horizontal_sorted_span[idx] else: cur_span = new_span merge_stage1_spans.append(cur_span) horizontal_sorted_merge_spans = sorted(merge_stage1_spans, key=lambda span_pos: span_pos.top) # for item in horizontal_sorted_merge_spans: # print(item) # print('---------------') merge_stage2_spans = [] cur_span = horizontal_sorted_merge_spans[0] span_count = len(horizontal_sorted_merge_spans) for idx in range(1, span_count): success, new_span = cur_span.merge_vertical_lines(horizontal_sorted_merge_spans[idx]) if not success: merge_stage2_spans.append(cur_span) cur_span = horizontal_sorted_merge_spans[idx] else: cur_span = new_span return [ item for item in merge_stage2_spans if item.width >0 ] def merge_bbox(bbox_a, bbox_b): top = min(bbox_a.top, bbox_b.top) left = min(bbox_a.left, bbox_b.left) right = max(bbox_a.right, bbox_b.right) bottom = max(bbox_a.bottom, bbox_b.bottom) width = right - left height = bottom - top return Elembbox(left, top, width, height) def merge_bbox_list(bbox_list_a, bbox_list_b): if bbox_list_a is None: return bbox_list_b if bbox_list_b is None: return bbox_list_a merge_bbox_ret = [] overlap_flag = [False] * len(bbox_list_b) for bbox_a in bbox_list_a: merge_box = bbox_a for idx, bbox_b in enumerate(bbox_list_b): if merge_box.is_overlap(bbox_b): overlap_flag[idx] = True merge_box = merge_bbox(merge_box, bbox_b) merge_bbox_ret.append(merge_box) for idx in range(len(bbox_list_b)): if overlap_flag[idx] == False: merge_bbox_ret.append(bbox_list_b[idx]) return merge_bbox_ret def find_all_table_bbox(pdf_path): loader = PDFMinerPDFasHTMLLoader(pdf_path) data = loader.load()[0] soup = BeautifulSoup(data.page_content,'html.parser') table_border = soup.find_all('span') h_span = [] v_span = [] font_size_list = [] for idx, c in enumerate(table_border): # print("----{}---".format(idx)) style_attribute = c.get('style') # 'position:absolute; border: gray 1px solid; left:0px; top:50px; width:612px; height:792px;' attr_list = [ p.split(':') for p in style_attribute.strip(";").split('; ')] span_pos = { k : int(v[:-2]) for k,v in attr_list if k in ['left', 'top', 'width', 'height', 'font-size']} keys = span_pos.keys() if 'font-size' in keys: font_size_list.append(span_pos['font-size']) if 'left' not in keys or 'top' not in keys or 'width' not in keys or 'height' not in keys: continue if span_pos['height'] == 0 and span_pos['width'] > 10: h_span.append(Elembbox(span_pos['left'],span_pos['top'],span_pos['width'],span_pos['height'])) if span_pos['width'] == 0 and span_pos['height'] > 10: v_span.append(Elembbox(span_pos['left'],span_pos['top'],span_pos['width'],span_pos['height'])) h_bbox_list = None if len(h_span) > 0: h_bbox_list = create_bbox_horizontal(h_span) # print("----h_span bbox----") # for item in h_bbox_list: # print(item) v_bbox_list = None if len(v_span) > 0: v_bbox_list = create_bbox_vertical(v_span) # print("----v_span bbox----") # for item in v_bbox_list: # print(item) merge_bboxs =merge_bbox_list(h_bbox_list, v_bbox_list) # print("----merged bbox----") # for item in merge_bboxs: # print(item) # update padding for Elembbox mode_font_size = statistics.mode(font_size_list) for i in range(len(merge_bboxs)): merge_bboxs[i].margin = mode_font_size return merge_bboxs def fontsize_mapping(heading_fonts_arr): heading_fonts_set = list(set(heading_fonts_arr)) heading_fonts_set.sort(reverse=True) idxs = range(len(heading_fonts_set)) font_idx_mapping = dict(zip(heading_fonts_set,idxs)) return font_idx_mapping import pdb def split_pdf_to_snippet(pdf_path): loader = PDFMinerPDFasHTMLLoader(pdf_path) data = loader.load()[0] soup = BeautifulSoup(data.page_content,'html.parser') content = soup.find_all('div') cur_fs = None cur_text = None snippets = [] # first collect all snippets that have the same font size table_elem_bboxs = find_all_table_bbox(pdf_path) print("table bbox count: {}".format(len(table_elem_bboxs))) skip_count = 0 def overlap_with_table(table_elem_bboxs, div_elem_bbox): for elem_bbox in table_elem_bboxs: if div_elem_bbox.is_overlap(elem_bbox): return True return False previous_div_bbox = None snippet_start = True snippet_follow = False snippet_state = snippet_start for c in content: div_style_attribute = c.get('style') attr_list = [ p.split(':') for p in div_style_attribute.strip(";").split('; ')] div_pos = { k : int(v[:-2]) for k,v in attr_list if k in ['left', 'top', 'width', 'height']} keys = div_pos.keys() if 'left' not in keys or 'top' not in keys or 'width' not in keys or 'height' not in keys: continue div_elem_bbox = Elembbox(div_pos['left'],div_pos['top'],div_pos['width'],div_pos['height']) if overlap_with_table(table_elem_bboxs, div_elem_bbox): skip_count += 1 continue # if these two div is not beside each other if not div_elem_bbox.is_beside(previous_div_bbox) and cur_text and cur_fs: snippets.append((cur_text,cur_fs,snippet_state)) cur_fs = None cur_text = None snippet_state = snippet_start previous_div_bbox = div_elem_bbox sp_list = c.find_all('span') if not sp_list: continue for sp in sp_list: st = sp.get('style') if not st: continue fs = re.findall('font-size:(\d+)px',st) if not fs: continue fs = int(fs[0]) if not cur_fs and not cur_text: cur_fs = fs cur_text = sp.text snippet_state = snippet_start continue if fs == cur_fs: cur_text += sp.text else: snippets.append((cur_text, cur_fs, snippet_state)) snippet_state = snippet_start if fs > cur_fs else snippet_follow cur_fs = fs cur_text = sp.text snippets.append((cur_text,cur_fs, snippet_follow)) # merge snippet merged_snippets = [] temp_list = [] doc_title = '' max_font_size = max([ item[1] for item in snippets]) for snippet in snippets: if max_font_size == snippet[1]: doc_title = snippet[0] if len(temp_list) == 0 or snippet[2] == False: temp_list.append(snippet) else: content_list = [ item[0] for item in temp_list] font_size_list = [ item[1] for item in temp_list] content = "\n".join(content_list) font_size = max(font_size_list) temp_list.clear() temp_list.append(snippet) merged_snippets.append({"content":content, "font_size":font_size}) print("filter {} table text".format(skip_count)) return merged_snippets, doc_title def split_pdf(pdf_path): semantic_snippets, doc_title = split_pdf_to_snippet(pdf_path) text_splitter = RecursiveCharacterTextSplitter( chunk_size = 1024, chunk_overlap = 0, separators=["\n\n", "\n", ".", "。", ",",","," "], ) for item in semantic_snippets: content = item["content"] chunks = text_splitter.create_documents([ content ] ) for chunk in chunks: snippet_info = { "content" : chunk.page_content, "font_size" : item["font_size"], "doc_title" : doc_title } yield snippet_info def summarize(content, chunk_size = 512, llm_endpoint=""): summary = content if llm_endpoint and len(content) > chunk_size: # todo: call LLM to summarize prompt_template = """对下面反引号这段文档进行摘要,字数不超过{} ``` {} ``` 摘要: """ prompt = prompt_template.format(chunk_size, content[:1536]) response_model = smr_client.invoke_endpoint( EndpointName=llm_endpoint, Body=json.dumps( { "inputs": prompt, "parameters": parameters, "history" : [] } ), ContentType="application/json", ) json_ret = json.loads(response_model['Body'].read().decode('utf8')) summary = json_ret['outputs'] return summary def convert_snippetJson2markdown(snippet_info, max_level=3): mk_head = "" p_head = "" for item in snippet_info["heading"][0:max_level][::-1]: mk_head += "#" head = "{} {}".format(mk_head, item["heading"].replace('\n','')) p_head += "{}\n".format(head) p_content = "{}\n{}".format(p_head, snippet_info['content']) return p_content if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, default='./1.pdf', help='input file') parser.add_argument('--output_dir', type=str, default='./', help='output file') parser.add_argument('--sep', type=str, default='=====', help='separtor') parser.add_argument('--title_level', type=int, default=4, help='keep the tiltes of level') parser.add_argument('--chunk_size', type=int, default=128, help='chunk_size') parser.add_argument('--llm_endpoint', type=str, default="", help='llm_endpoint') args = parser.parse_args() pdf_path = args.input_file kg_dir = args.output_dir kg_name = os.path.basename(pdf_path).replace('.pdf','.json') separtor = args.sep max_title_level = args.title_level chunk_size = args.chunk_size llm_endpoint = args.llm_endpoint idx = 1 f_name = "{}/{}".format(kg_dir, kg_name) out_f = open(f_name, 'w') snippet_arr = [] for snippet_info in split_pdf(pdf_path): snippet_arr.append(snippet_info) all_info = json.dumps(snippet_arr, ensure_ascii=False) out_f.write(all_info) out_f.close() print("finish separation of {}".format(pdf_path))
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.document_loaders.PDFMinerPDFasHTMLLoader" ]
[((324, 357), 'boto3.client', 'boto3.client', (['"""sagemaker-runtime"""'], {}), "('sagemaker-runtime')\n", (336, 357), False, 'import boto3\n'), ((8397, 8430), 'langchain.document_loaders.PDFMinerPDFasHTMLLoader', 'PDFMinerPDFasHTMLLoader', (['pdf_path'], {}), '(pdf_path)\n', (8420, 8430), False, 'from langchain.document_loaders import PDFMinerPDFasHTMLLoader\n'), ((8470, 8517), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data.page_content', '"""html.parser"""'], {}), "(data.page_content, 'html.parser')\n", (8483, 8517), False, 'from bs4 import BeautifulSoup\n'), ((10234, 10265), 'statistics.mode', 'statistics.mode', (['font_size_list'], {}), '(font_size_list)\n', (10249, 10265), False, 'import statistics\n'), ((10698, 10731), 'langchain.document_loaders.PDFMinerPDFasHTMLLoader', 'PDFMinerPDFasHTMLLoader', (['pdf_path'], {}), '(pdf_path)\n', (10721, 10731), False, 'from langchain.document_loaders import PDFMinerPDFasHTMLLoader\n'), ((10772, 10819), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data.page_content', '"""html.parser"""'], {}), "(data.page_content, 'html.parser')\n", (10785, 10819), False, 'from bs4 import BeautifulSoup\n'), ((14183, 14304), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(0)', 'separators': "['\\n\\n', '\\n', '.', '。', ',', ',', ' ']"}), "(chunk_size=1024, chunk_overlap=0, separators\n =['\\n\\n', '\\n', '.', '。', ',', ',', ' '])\n", (14213, 14304), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter\n'), ((15947, 15972), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15970, 15972), False, 'import argparse\n'), ((16983, 17026), 'json.dumps', 'json.dumps', (['snippet_arr'], {'ensure_ascii': '(False)'}), '(snippet_arr, ensure_ascii=False)\n', (16993, 17026), False, 'import json\n'), ((12605, 12641), 're.findall', 're.findall', (['"""font-size:(\\\\d+)px"""', 'st'], {}), "('font-size:(\\\\d+)px', st)\n", (12615, 12641), False, 'import re\n'), ((16589, 16615), 'os.path.basename', 'os.path.basename', (['pdf_path'], {}), '(pdf_path)\n', (16605, 16615), False, 'import os\n'), ((15170, 15241), 'json.dumps', 'json.dumps', (["{'inputs': prompt, 'parameters': parameters, 'history': []}"], {}), "({'inputs': prompt, 'parameters': parameters, 'history': []})\n", (15180, 15241), False, 'import json\n')]
from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.llms import OpenAI from langchain.chains import VectorDBQA from langchain.document_loaders import TextLoader from typing import List from langchain.schema import Document import os os.environ['OPENAI_API_KEY'] = "your-api-key" class Genie: def __init__(self, file_path: str): self.file_path = file_path self.loader = TextLoader(self.file_path) self.documents = self.loader.load() self.texts = self.text_split(self.documents) self.vectordb = self.embeddings(self.texts) self.genie = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=self.vectordb) @staticmethod def text_split(documents: TextLoader): text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) return texts @staticmethod def embeddings(texts: List[Document]): embeddings = OpenAIEmbeddings() vectordb = Chroma.from_documents(texts, embeddings) return vectordb def ask(self, query: str): return self.genie.run(query) if __name__ == "__main__": genie = Genie("example.txt") print(genie.ask("How is the wheater like?"))
[ "langchain.vectorstores.Chroma.from_documents", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.llms.OpenAI", "langchain.embeddings.OpenAIEmbeddings", "langchain.document_loaders.TextLoader" ]
[((515, 541), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.file_path'], {}), '(self.file_path)\n', (525, 541), False, 'from langchain.document_loaders import TextLoader\n'), ((886, 950), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (916, 950), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1112, 1130), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1128, 1130), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1150, 1190), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1171, 1190), False, 'from langchain.vectorstores import Chroma\n'), ((743, 751), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (749, 751), False, 'from langchain.llms import OpenAI\n')]
import dataclasses import json import numpy as np import os import requests import sys from typing import List from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain.chat_models import ChatOpenAI from langchain import VectorDBQA from langchain.chains import RetrievalQA, RetrievalQAWithSourcesChain from langchain.schema import Document from langchain.prompts import PromptTemplate from newspaper import Article from newspaper.article import ArticleException import openai from tenacity import ( retry, stop_after_attempt, wait_random_exponential, ) from bs4 import BeautifulSoup # Required to parse HTML from urllib.parse import unquote # Required to unquote URLs import argparse prompt_template = """Use the following pieces of context to answer the question completely and precisely in up to 500 words. If you don't know the answer, just say "I don't know" and explain why the context is insufficient to answer the question. You need to support every statement in the answer with in-line citations to passages given in the the context. The citations should appear as numbers such as [1], [2] that refer to the Passage IDs of the given passages. A statement may need to be supported by multiple references and should then be cited as [1] [2]. (for example, "Paris is the capital of France [1] [2]." where "1" and "2" are the Passage IDs of the first and second passage). {context} Question: {question} Answer:""" PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) from data_utils import example_utils parser = argparse.ArgumentParser() parser.add_argument("--input_file", help="input filepath", type=str) parser.add_argument("--output_file", help="output filepath", type=str) parser.add_argument("--topk", help="the value of k for the topk passages to use for QA", type=int, default=5) args = parser.parse_args() text_splitter = CharacterTextSplitter(separator=' ', chunk_size=1000, chunk_overlap=200) ##### # OPENAI ##### OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') if OPENAI_API_KEY is None: raise ValueError("Please set the OpenAI API key as environment variable 'OPENAI_API_KEY'.") openai.organization = "" openai.api_key = "" llm = ChatOpenAI(model_name='gpt-4', openai_api_key=OPENAI_API_KEY) ##### # Google Search ##### CSE_URL = "https://www.googleapis.com/customsearch/v1" CSE_URL = "https://cse.google.com/cse?cx=74509b47ac2e54393" gs_api_key = os.getenv('CUSTOM_SEARCH_API_KEY') pse_cx = os.getenv('CUSTOM_SEARCH_CX') if gs_api_key is None: raise ValueError("Please set the Custom search API key as environment variable 'CUSTOM_SEARCH_API_KEY'.") if pse_cx is None: raise ValueError("Please set the Programmable search engine ID as environment variable 'CUSTOM_SEARCH_CX'.") @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6)) def chat_completion_with_backoff(**kwargs): return openai.ChatCompletion.create(**kwargs) def google_search( query: str, api_key: str = None, cx: str = None ): """Get top 10 webpages from Google search. Args: query: search query api_key: custom search engine api key cx: programmable search engine id Returns: top-10 search results in json format """ response = requests.get(f"https://www.google.com/search?q={query}") # Make the request soup = BeautifulSoup(response.text, "html.parser") # Parse the HTML links = soup.find_all("a") # Find all the links in the HTML urls = [] for l in [link for link in links if link["href"].startswith("/url?q=")]: # get the url url = l["href"] # remove the "/url?q=" part url = url.replace("/url?q=", "") # remove the part after the "&sa=..." url = unquote(url.split("&sa=")[0]) # special case for google scholar if url.startswith("https://scholar.google.com/scholar_url?url=http"): url = url.replace("https://scholar.google.com/scholar_url?url=", "").split("&")[0] elif 'google.com/' in url: # skip google links continue if url.endswith('.pdf'): # skip pdf links continue if '#' in url: # remove anchors (e.g. wikipedia.com/bob#history and wikipedia.com/bob#genetics are the same page) url = url.split('#')[0] # print the url urls.append(url) # Use numpy to dedupe the list of urls after removing anchors urls = list(np.unique(urls)) return urls # if api_key is None: # api_key = gs_api_key # if cx is None: # cx = pse_cx # res = requests.get( # url=CSE_URL, # params={ # "q": query, # "key": api_key, # "cx": cx, # }, # ) # if res.status_code != 200: # print(f"Google search error: {res.status_code}") # return [] # res = res.json() # if 'items' in res: # return res['items'] # else: # return [] def scrape_and_parse(url: str): """Scrape a webpage and parse it into a Document object""" a = Article(url) try: a.download() a.parse() except Exception as e: return None return { "url": url, "text": a.text, } def scrape_and_filter(urls: list): doc_list = [] for u in urls: print(f"Processing: {u}") doc = scrape_and_parse(u) if doc is None: continue elif "Access" in doc["text"] and "Denied" in doc["text"]: continue else: doc_list.append(doc) return doc_list def retrieval_gpt_generate(query: str, retrieved_documents: List[Document], topk: int): texts = text_splitter.split_documents(retrieved_documents) embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY, max_retries=1000) while True: try: docsearch = Chroma.from_documents(texts, embeddings) except Exception as e: continue break doc_retriever = docsearch.as_retriever(search_kwargs={"k": topk}) topk_relevant_passages = doc_retriever.get_relevant_documents(query) topk_relevant_passages_content = ["Passage ID " + str(i+1) + ": " + doc.page_content.replace("\n","") for i, doc in enumerate(topk_relevant_passages)] ret_passages = [{"text": p, "url": d.metadata["source"]} for p, d in zip(topk_relevant_passages_content, topk_relevant_passages)] cur_prompt = prompt_template.format(context="Context: \n" + "\n\n".join(topk_relevant_passages_content), question=query) resp = chat_completion_with_backoff( model="gpt-4", messages=[{"role": "user", "content": cur_prompt}], max_tokens=2048, ) answer = resp["choices"][0]["message"]["content"] # chain_type_kwargs = {"prompt": PROMPT} # qa = VectorDBQA.from_chain_type(llm=llm, chain_type="stuff", vectorstore=docsearch, return_source_documents=True) # qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={"k": topk}), return_source_documents=True, chain_type_kwargs=chain_type_kwargs) # result = qa({"query": query}) return answer, ret_passages if __name__ == '__main__': input_data = example_utils.read_examples(args.input_file) f = open(args.output_file, "a") for example in input_data: query_text = example.question # Google search first search_results = google_search(query_text) # search_urls = [r['link'] for r in search_results] if len(search_results) > 0: all_docs = scrape_and_filter(search_results) else: all_docs = [] example.answers["ret_read_gpt4"] = example_utils.Answer(answer_string="I don't know.", attribution=[]) json.dump(dataclasses.asdict(example), f) f.write("\n") continue all_docs = [Document(page_content=d['text'], metadata={'source': d['url']}) for d in all_docs] all_docs_content_lens = [len(doc.page_content.strip()) for doc in all_docs] if not all_docs or not sum(all_docs_content_lens): example.answers["ret_read_gpt4"] = example_utils.Answer(answer_string="I don't know.", attribution=[]) json.dump(dataclasses.asdict(example), f) f.write("\n") continue gpt_query_text = "I am an expert in the field of " + example.metadata.field + ". Please answer my question: " + query_text answer, attributions = retrieval_gpt_generate(gpt_query_text, all_docs, args.topk) example.answers["rr_gs_gpt4"] = example_utils.Answer(answer_string=answer, attribution=attributions) json.dump(dataclasses.asdict(example), f) f.write("\n")
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.chat_models.ChatOpenAI", "langchain.vectorstores.Chroma.from_documents", "langchain.prompts.PromptTemplate", "langchain.schema.Document", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((1563, 1648), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (1577, 1648), False, 'from langchain.prompts import PromptTemplate\n'), ((1786, 1811), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1809, 1811), False, 'import argparse\n'), ((2108, 2180), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '""" """', 'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), "(separator=' ', chunk_size=1000, chunk_overlap=200)\n", (2129, 2180), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((2220, 2247), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2229, 2247), False, 'import os\n'), ((2424, 2485), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'openai_api_key': 'OPENAI_API_KEY'}), "(model_name='gpt-4', openai_api_key=OPENAI_API_KEY)\n", (2434, 2485), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2643, 2677), 'os.getenv', 'os.getenv', (['"""CUSTOM_SEARCH_API_KEY"""'], {}), "('CUSTOM_SEARCH_API_KEY')\n", (2652, 2677), False, 'import os\n'), ((2687, 2716), 'os.getenv', 'os.getenv', (['"""CUSTOM_SEARCH_CX"""'], {}), "('CUSTOM_SEARCH_CX')\n", (2696, 2716), False, 'import os\n'), ((3119, 3157), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {}), '(**kwargs)\n', (3147, 3157), False, 'import openai\n'), ((3509, 3565), 'requests.get', 'requests.get', (['f"""https://www.google.com/search?q={query}"""'], {}), "(f'https://www.google.com/search?q={query}')\n", (3521, 3565), False, 'import requests\n'), ((3596, 3639), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (3609, 3639), False, 'from bs4 import BeautifulSoup\n'), ((5313, 5325), 'newspaper.Article', 'Article', (['url'], {}), '(url)\n', (5320, 5325), False, 'from newspaper import Article\n'), ((6052, 6117), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY', 'max_retries': '(1000)'}), '(openai_api_key=OPENAI_API_KEY, max_retries=1000)\n', (6068, 6117), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((7521, 7565), 'data_utils.example_utils.read_examples', 'example_utils.read_examples', (['args.input_file'], {}), '(args.input_file)\n', (7548, 7565), False, 'from data_utils import example_utils\n'), ((2996, 3034), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (3019, 3034), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((3041, 3062), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (3059, 3062), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((4678, 4693), 'numpy.unique', 'np.unique', (['urls'], {}), '(urls)\n', (4687, 4693), True, 'import numpy as np\n'), ((9024, 9092), 'data_utils.example_utils.Answer', 'example_utils.Answer', ([], {'answer_string': 'answer', 'attribution': 'attributions'}), '(answer_string=answer, attribution=attributions)\n', (9044, 9092), False, 'from data_utils import example_utils\n'), ((1700, 1725), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1715, 1725), False, 'import os\n'), ((6171, 6211), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (6192, 6211), False, 'from langchain.vectorstores import Chroma\n'), ((7994, 8061), 'data_utils.example_utils.Answer', 'example_utils.Answer', ([], {'answer_string': '"""I don\'t know."""', 'attribution': '[]'}), '(answer_string="I don\'t know.", attribution=[])\n', (8014, 8061), False, 'from data_utils import example_utils\n'), ((8252, 8315), 'langchain.schema.Document', 'Document', ([], {'page_content': "d['text']", 'metadata': "{'source': d['url']}"}), "(page_content=d['text'], metadata={'source': d['url']})\n", (8260, 8315), False, 'from langchain.schema import Document\n'), ((8525, 8592), 'data_utils.example_utils.Answer', 'example_utils.Answer', ([], {'answer_string': '"""I don\'t know."""', 'attribution': '[]'}), '(answer_string="I don\'t know.", attribution=[])\n', (8545, 8592), False, 'from data_utils import example_utils\n'), ((9175, 9202), 'dataclasses.asdict', 'dataclasses.asdict', (['example'], {}), '(example)\n', (9193, 9202), False, 'import dataclasses\n'), ((8152, 8179), 'dataclasses.asdict', 'dataclasses.asdict', (['example'], {}), '(example)\n', (8170, 8179), False, 'import dataclasses\n'), ((8683, 8710), 'dataclasses.asdict', 'dataclasses.asdict', (['example'], {}), '(example)\n', (8701, 8710), False, 'import dataclasses\n')]
import logging from pathlib import Path from typing import List, Optional, Tuple from dotenv import load_dotenv load_dotenv() from queue import Empty, Queue from threading import Thread import gradio as gr from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.prompts import HumanMessagePromptTemplate from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage from callback import QueueCallback MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4"] DEFAULT_TEMPERATURE = 0.7 ChatHistory = List[str] logging.basicConfig( format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO ) # load up our system prompt default_system_prompt = Path("prompts/system.prompt").read_text() # for the human, we will just inject the text human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}") def on_message_button_click( chat: Optional[ChatOpenAI], message: str, chatbot_messages: ChatHistory, messages: List[BaseMessage], ) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]: if chat is None: # in the queue we will store our streamed tokens queue = Queue() # let's create our default chat chat = ChatOpenAI( model_name=MODELS_NAMES[0], temperature=DEFAULT_TEMPERATURE, streaming=True, callbacks=([QueueCallback(queue)]), ) else: # hacky way to get the queue back queue = chat.callbacks[0].queue job_done = object() logging.info(f"Asking question to GPT, messages={messages}") # let's add the messages to our stuff messages.append(HumanMessage(content=message)) chatbot_messages.append((message, "")) # this is a little wrapper we need cuz we have to add the job_done def task(): chat(messages) queue.put(job_done) # now let's start a thread and run the generation inside it t = Thread(target=task) t.start() # this will hold the content as we generate content = "" # now, we read the next_token from queue and do what it has to be done while True: try: next_token = queue.get(True, timeout=1) if next_token is job_done: break content += next_token chatbot_messages[-1] = (message, content) yield chat, "", chatbot_messages, messages except Empty: continue # finally we can add our reply to messsages messages.append(AIMessage(content=content)) logging.debug(f"reply = {content}") logging.info(f"Done!") return chat, "", chatbot_messages, messages def system_prompt_handler(value: str) -> str: return value def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]: return "", [], [SystemMessage(content=system_prompt)] def on_apply_settings_button_click( system_prompt: str, model_name: str, temperature: float ): logging.info( f"Applying settings: model_name={model_name}, temperature={temperature}" ) chat = ChatOpenAI( model_name=model_name, temperature=temperature, streaming=True, callbacks=[QueueCallback(Queue())], ) # don't forget to nuke our queue chat.callbacks[0].queue.empty() return chat, *on_clear_button_click(system_prompt) # some css why not, "borrowed" from https://huggingface.co/spaces/ysharma/Gradio-demo-streaming/blob/main/app.py with gr.Blocks( css="""#col_container {width: 700px; margin-left: auto; margin-right: auto;} #chatbot {height: 400px; overflow: auto;}""" ) as demo: system_prompt = gr.State(default_system_prompt) # here we keep our state so multiple user can use the app at the same time! messages = gr.State([SystemMessage(content=default_system_prompt)]) # same thing for the chat, we want one chat per use so callbacks are unique I guess chat = gr.State(None) with gr.Column(elem_id="col_container"): gr.Markdown("# Welcome to GradioGPT! 🌟🚀") gr.Markdown( "An easy to use template. It comes with state and settings managment" ) with gr.Column(): system_prompt_area = gr.TextArea( default_system_prompt, lines=4, label="system prompt", interactive=True ) # we store the value into the state to avoid re rendering of the area system_prompt_area.input( system_prompt_handler, inputs=[system_prompt_area], outputs=[system_prompt], ) system_prompt_button = gr.Button("Set") chatbot = gr.Chatbot() with gr.Column(): message = gr.Textbox(label="chat input") message.submit( on_message_button_click, [chat, message, chatbot, messages], [chat, message, chatbot, messages], queue=True, ) message_button = gr.Button("Submit", variant="primary") message_button.click( on_message_button_click, [chat, message, chatbot, messages], [chat, message, chatbot, messages], ) with gr.Row(): with gr.Column(): clear_button = gr.Button("Clear") clear_button.click( on_clear_button_click, [system_prompt], [message, chatbot, messages], queue=False, ) with gr.Accordion("Settings", open=False): model_name = gr.Dropdown( choices=MODELS_NAMES, value=MODELS_NAMES[0], label="model" ) temperature = gr.Slider( minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="temperature", interactive=True, ) apply_settings_button = gr.Button("Apply") apply_settings_button.click( on_apply_settings_button_click, [system_prompt, model_name, temperature], [chat, message, chatbot, messages], ) system_prompt_button.click( on_apply_settings_button_click, [system_prompt, model_name, temperature], [chat, message, chatbot, messages], ) demo.queue() demo.launch()
[ "langchain.schema.AIMessage", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.schema.HumanMessage", "langchain.schema.SystemMessage" ]
[((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(message)s',\n level=logging.INFO)\n", (623, 698), False, 'import logging\n'), ((873, 923), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (913, 923), False, 'from langchain.prompts import HumanMessagePromptTemplate\n'), ((1596, 1656), 'logging.info', 'logging.info', (['f"""Asking question to GPT, messages={messages}"""'], {}), "(f'Asking question to GPT, messages={messages}')\n", (1608, 1656), False, 'import logging\n'), ((2004, 2023), 'threading.Thread', 'Thread', ([], {'target': 'task'}), '(target=task)\n', (2010, 2023), False, 'from threading import Thread\n'), ((2606, 2641), 'logging.debug', 'logging.debug', (['f"""reply = {content}"""'], {}), "(f'reply = {content}')\n", (2619, 2641), False, 'import logging\n'), ((2646, 2668), 'logging.info', 'logging.info', (['f"""Done!"""'], {}), "(f'Done!')\n", (2658, 2668), False, 'import logging\n'), ((3020, 3111), 'logging.info', 'logging.info', (['f"""Applying settings: model_name={model_name}, temperature={temperature}"""'], {}), "(\n f'Applying settings: model_name={model_name}, temperature={temperature}')\n", (3032, 3111), False, 'import logging\n'), ((3530, 3688), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""'}), '(css=\n """#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""\n )\n', (3539, 3688), True, 'import gradio as gr\n'), ((3714, 3745), 'gradio.State', 'gr.State', (['default_system_prompt'], {}), '(default_system_prompt)\n', (3722, 3745), True, 'import gradio as gr\n'), ((3997, 4011), 'gradio.State', 'gr.State', (['None'], {}), '(None)\n', (4005, 4011), True, 'import gradio as gr\n'), ((753, 782), 'pathlib.Path', 'Path', (['"""prompts/system.prompt"""'], {}), "('prompts/system.prompt')\n", (757, 782), False, 'from pathlib import Path\n'), ((1228, 1235), 'queue.Queue', 'Queue', ([], {}), '()\n', (1233, 1235), False, 'from queue import Empty, Queue\n'), ((1719, 1748), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'message'}), '(content=message)\n', (1731, 1748), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2574, 2600), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2583, 2600), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4022, 4056), 'gradio.Column', 'gr.Column', ([], {'elem_id': '"""col_container"""'}), "(elem_id='col_container')\n", (4031, 4056), True, 'import gradio as gr\n'), ((4066, 4107), 'gradio.Markdown', 'gr.Markdown', (['"""# Welcome to GradioGPT! 🌟🚀"""'], {}), "('# Welcome to GradioGPT! 🌟🚀')\n", (4077, 4107), True, 'import gradio as gr\n'), ((4116, 4203), 'gradio.Markdown', 'gr.Markdown', (['"""An easy to use template. It comes with state and settings managment"""'], {}), "(\n 'An easy to use template. It comes with state and settings managment')\n", (4127, 4203), True, 'import gradio as gr\n'), ((4725, 4737), 'gradio.Chatbot', 'gr.Chatbot', ([], {}), '()\n', (4735, 4737), True, 'import gradio as gr\n'), ((2877, 2913), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (2890, 2913), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((3851, 3895), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'default_system_prompt'}), '(content=default_system_prompt)\n', (3864, 3895), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4234, 4245), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4243, 4245), True, 'import gradio as gr\n'), ((4280, 4368), 'gradio.TextArea', 'gr.TextArea', (['default_system_prompt'], {'lines': '(4)', 'label': '"""system prompt"""', 'interactive': '(True)'}), "(default_system_prompt, lines=4, label='system prompt',\n interactive=True)\n", (4291, 4368), True, 'import gradio as gr\n'), ((4689, 4705), 'gradio.Button', 'gr.Button', (['"""Set"""'], {}), "('Set')\n", (4698, 4705), True, 'import gradio as gr\n'), ((4751, 4762), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4760, 4762), True, 'import gradio as gr\n'), ((4786, 4816), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""chat input"""'}), "(label='chat input')\n", (4796, 4816), True, 'import gradio as gr\n'), ((5061, 5099), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {'variant': '"""primary"""'}), "('Submit', variant='primary')\n", (5070, 5099), True, 'import gradio as gr\n'), ((5306, 5314), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5312, 5314), True, 'import gradio as gr\n'), ((5333, 5344), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (5342, 5344), True, 'import gradio as gr\n'), ((5377, 5395), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (5386, 5395), True, 'import gradio as gr\n'), ((5630, 5666), 'gradio.Accordion', 'gr.Accordion', (['"""Settings"""'], {'open': '(False)'}), "('Settings', open=False)\n", (5642, 5666), True, 'import gradio as gr\n'), ((5697, 5768), 'gradio.Dropdown', 'gr.Dropdown', ([], {'choices': 'MODELS_NAMES', 'value': 'MODELS_NAMES[0]', 'label': '"""model"""'}), "(choices=MODELS_NAMES, value=MODELS_NAMES[0], label='model')\n", (5708, 5768), True, 'import gradio as gr\n'), ((5837, 5937), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.1)', 'label': '"""temperature"""', 'interactive': '(True)'}), "(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label=\n 'temperature', interactive=True)\n", (5846, 5937), True, 'import gradio as gr\n'), ((6112, 6130), 'gradio.Button', 'gr.Button', (['"""Apply"""'], {}), "('Apply')\n", (6121, 6130), True, 'import gradio as gr\n'), ((1440, 1460), 'callback.QueueCallback', 'QueueCallback', (['queue'], {}), '(queue)\n', (1453, 1460), False, 'from callback import QueueCallback\n'), ((3265, 3272), 'queue.Queue', 'Queue', ([], {}), '()\n', (3270, 3272), False, 'from queue import Empty, Queue\n')]
import logging from langchain.chains import RetrievalQA from neogpt.prompts.prompt import get_prompt def local_retriever(db, llm, persona="default"): """ Fn: local_retriever Description: The function sets up the local retrieval-based question-answering system. Args: db (object): The database object llm (object): The LLM model object return: chain (object): The chain object """ try: prompt, memory = get_prompt(persona=persona) # Create a retriever object local_retriever = db.as_retriever() chain = RetrievalQA.from_chain_type( llm=llm, retriever=local_retriever, chain_type="stuff", chain_type_kwargs={"prompt": prompt, "memory": memory}, return_source_documents=True, ) logging.info("Loaded Local Retriever Successfully 🔍") except Exception as e: logging.info(f"Error {e}") return chain
[ "langchain.chains.RetrievalQA.from_chain_type" ]
[((466, 493), 'neogpt.prompts.prompt.get_prompt', 'get_prompt', ([], {'persona': 'persona'}), '(persona=persona)\n', (476, 493), False, 'from neogpt.prompts.prompt import get_prompt\n'), ((590, 768), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'retriever': 'local_retriever', 'chain_type': '"""stuff"""', 'chain_type_kwargs': "{'prompt': prompt, 'memory': memory}", 'return_source_documents': '(True)'}), "(llm=llm, retriever=local_retriever, chain_type=\n 'stuff', chain_type_kwargs={'prompt': prompt, 'memory': memory},\n return_source_documents=True)\n", (617, 768), False, 'from langchain.chains import RetrievalQA\n'), ((839, 892), 'logging.info', 'logging.info', (['"""Loaded Local Retriever Successfully 🔍"""'], {}), "('Loaded Local Retriever Successfully 🔍')\n", (851, 892), False, 'import logging\n'), ((928, 954), 'logging.info', 'logging.info', (['f"""Error {e}"""'], {}), "(f'Error {e}')\n", (940, 954), False, 'import logging\n')]
from langchain import PromptTemplate PROMPT = """ 你需要扮演一个优秀的关键信息提取助手,从人类的对话中提取关键性内容(最多5个关键词),以协助其他助手更精准地回答问题。 注意:你不需要做任何解释说明,只需严格按照示例的格式输出关键词。 示例: 人类:我有一个服装厂,是否可以应用你们的装箱算法改善装载率呢? AI: 服装厂, 装箱算法, 装载率 现在开始: 人类:{query} AI: """ def information_extraction_raw_prompt(): return PromptTemplate(template=PROMPT, input_variables=["query"]) def information_extraction_prompt(query: str): P = PromptTemplate(template=PROMPT, input_variables=["query"]) return P.format(query=query) if __name__ == "__main__": print(information_extraction_prompt("你们的装箱算法能不能用在家居业呀?"))
[ "langchain.PromptTemplate" ]
[((281, 339), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'PROMPT', 'input_variables': "['query']"}), "(template=PROMPT, input_variables=['query'])\n", (295, 339), False, 'from langchain import PromptTemplate\n'), ((397, 455), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'PROMPT', 'input_variables': "['query']"}), "(template=PROMPT, input_variables=['query'])\n", (411, 455), False, 'from langchain import PromptTemplate\n')]
import base64 import email from enum import Enum from typing import Any, Dict, List, Optional, Type from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.gmail.base import GmailBaseTool from langchain.tools.gmail.utils import clean_email_body class Resource(str, Enum): """Enumerator of Resources to search.""" THREADS = "threads" MESSAGES = "messages" class SearchArgsSchema(BaseModel): """Input for SearchGmailTool.""" # From https://support.google.com/mail/answer/7190?hl=en query: str = Field( ..., description="The Gmail query. Example filters include from:sender," " to:recipient, subject:subject, -filtered_term," " in:folder, is:important|read|starred, after:year/mo/date, " "before:year/mo/date, label:label_name" ' "exact phrase".' " Search newer/older than using d (day), m (month), and y (year): " "newer_than:2d, older_than:1y." " Attachments with extension example: filename:pdf. Multiple term" " matching example: from:amy OR from:david.", ) resource: Resource = Field( default=Resource.MESSAGES, description="Whether to search for threads or messages.", ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) class GmailSearch(GmailBaseTool): """Tool that searches for messages or threads in Gmail.""" name: str = "search_gmail" description: str = ( "Use this tool to search for email messages or threads." " The input must be a valid Gmail query." " The output is a JSON list of the requested resource." ) args_schema: Type[SearchArgsSchema] = SearchArgsSchema def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Add the thread message snippets to the thread results results = [] for thread in threads: thread_id = thread["id"] thread_data = ( self.api_resource.users() .threads() .get(userId="me", id=thread_id) .execute() ) messages = thread_data["messages"] thread["messages"] = [] for message in messages: snippet = message["snippet"] thread["messages"].append({"snippet": snippet, "id": message["id"]}) results.append(thread) return results def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: results = [] for message in messages: message_id = message["id"] message_data = ( self.api_resource.users() .messages() .get(userId="me", format="raw", id=message_id) .execute() ) raw_message = base64.urlsafe_b64decode(message_data["raw"]) email_msg = email.message_from_bytes(raw_message) subject = email_msg["Subject"] sender = email_msg["From"] message_body = email_msg.get_payload() body = clean_email_body(message_body) results.append( { "id": message["id"], "threadId": message_data["threadId"], "snippet": message_data["snippet"], "body": body, "subject": subject, "sender": sender, } ) return results def _run( self, query: str, resource: Resource = Resource.MESSAGES, max_results: int = 10, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" results = ( self.api_resource.users() .messages() .list(userId="me", q=query, maxResults=max_results) .execute() .get(resource.value, []) ) if resource == Resource.THREADS: return self._parse_threads(results) elif resource == Resource.MESSAGES: return self._parse_messages(results) else: raise NotImplementedError(f"Resource of type {resource} not implemented.")
[ "langchain.tools.gmail.utils.clean_email_body", "langchain.pydantic_v1.Field" ]
[((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')]
from langchain import PromptTemplate from codedog.templates import grimoire_en TRANSLATE_PROMPT = PromptTemplate( template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=["language", "description", "content"] )
[ "langchain.PromptTemplate" ]
[((100, 217), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'grimoire_en.TRANSLATE_PR_REVIEW', 'input_variables': "['language', 'description', 'content']"}), "(template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=[\n 'language', 'description', 'content'])\n", (114, 217), False, 'from langchain import PromptTemplate\n')]
# Importing necessary library import streamlit as st # Setting up the page configuration st.set_page_config( page_title="QuickDigest AI", page_icon=":brain:", layout="wide", initial_sidebar_state="expanded" ) # Defining the function to display the home page def home(): import streamlit as st from streamlit_extras.badges import badge from streamlit_extras.colored_header import colored_header from streamlit_extras.let_it_rain import rain # Displaying a rain animation with specified parameters rain( emoji="🎈", font_size=54, falling_speed=5, animation_length="1", ) # Displaying a colored header with specified parameters colored_header( label="QuickDigest AI🧠, Your Intelligent Data Companion", description="~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop", color_name="violet-70", ) # Displaying information and warnings in the sidebar st.sidebar.info( "Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen." ) st.sidebar.info( "For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq)." ) st.sidebar.warning( "LLMs may produce inaccurate information about people, places, or facts. Don't entirely trust them." ) # Displaying markdown text on the page st.markdown( "<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>", unsafe_allow_html=True ) st.markdown( "**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform's cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files." ) st.markdown( "**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience." ) st.markdown( "**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI's real-time web search feature ensures you're always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time." ) st.markdown( "**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes." ) st.markdown("---") # Displaying a support section with badges and link button st.markdown("<h5>Support Us</h5>", unsafe_allow_html=True) col1, col2, col3, col4 = st.columns(4) with col1: st.write("Star this repository on Github") badge(type="github", name="codingis4noobs2/QuickDigest") with col2: st.write("Follow me on twitter") badge(type="twitter", name="4gameparth") with col3: st.write("Buy me a coffee") badge(type="buymeacoffee", name="codingis4noobs2") with col4: st.link_button("Upvote on Replit", "https://replit.com/@ParthShah38/QuickDigestAI?v=1") # Function to display chat with files page def chat_with_files(): import os import streamlit as st from streamlit_extras.badges import badge from streamlit_extras.colored_header import colored_header from llama_index import ( OpenAIEmbedding, ServiceContext, set_global_service_context, ) from llama_index.llms import OpenAI from llama_index.chat_engine.types import StreamingAgentChatResponse from llama_index import SimpleDirectoryReader, VectorStoreIndex import assemblyai as aai from PyPDF2 import PdfReader from docx import Document # Cache the result to avoid recomputation @st.cache_resource(show_spinner="Indexing documents...Please have patience") def build_index(files): documents = SimpleDirectoryReader(input_files=files).load_data() index = VectorStoreIndex.from_documents(documents) return index # Handle streaming responses def handle_stream(root, stream: StreamingAgentChatResponse): text = "" root.markdown("Thinking...") for token in stream.response_gen: text += token root.markdown(text) return text # Define constants and settings CACHE_DIR = "./uploads" aai.settings.api_key = st.secrets['assembly_api_key'] # Render chat messages def render_message(message): with st.chat_message(message["role"]): st.write(message["text"]) # Transcribe audio and video files def transcribe_audio_video(file_path): transcriber = aai.Transcriber() transcript = transcriber.transcribe(file_path) transcript_path = file_path + ".txt" with open(transcript_path, "w") as f: f.write(transcript.text) return transcript_path # Upload files and cache them def upload_files(types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx'], **kwargs): files = st.file_uploader( label=f"Upload files", type=types, **kwargs ) if not files: st.info(f"Please add documents, Note: Scanned documents are not supported yet!") st.stop() return cache_files(files, types=types) # Cache uploaded files def cache_files(files, types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx']) -> list[str]: filepaths = [] for file in files: # Determine the file extension from the mime type ext = file.type.split("/")[-1] if ext == "plain": # Handle text/plain mime type ext = "txt" elif ext in ["vnd.openxmlformats-officedocument.wordprocessingml.document", "vnd.ms-word"]: ext = "docx" # or "doc" depending on your needs if ext not in types: continue filepath = f"{CACHE_DIR}/{file.name}" with open(filepath, "wb") as f: f.write(file.getvalue()) if ext in ["mp3", "mp4"]: filepath = transcribe_audio_video(filepath) filepaths.append(filepath) # st.sidebar.write("Uploaded files", filepaths) # Debug statement with st.sidebar: with st.expander("Uploaded Files"): filepaths_pretty = "\n".join(f"- {filepath}" for filepath in filepaths) st.markdown(f"{filepaths_pretty}") return filepaths def transcribe_and_save(file_path): transcriber = aai.Transcriber() transcript = transcriber.transcribe(file_path) transcript_path = file_path + ".txt" with open(transcript_path, "w") as f: f.write(transcript.text) return transcript_path # Save extracted text to a txt file def save_extracted_text_to_txt(text, filename): txt_filename = os.path.splitext(filename)[0] + ".txt" txt_filepath = os.path.join('uploads', txt_filename) with open(txt_filepath, 'w', encoding='utf-8') as txt_file: txt_file.write(text) return txt_filepath # Get OpenAI API key from session state def get_key(): return st.session_state["openai_api_key"] # Read text from Word document def read_word_file(file_path): doc = Document(file_path) full_text = [] for para in doc.paragraphs: full_text.append(para.text) return '\n'.join(full_text) # Process uploaded documents def process_documents(documents): processed_docs = [] for doc in documents: if doc.endswith('.pdf'): processed_docs.append(process_pdf(doc)) elif doc.endswith(('.doc', '.docx')): text = read_word_file(doc) txt_filepath = save_extracted_text_to_txt(text, os.path.basename(doc)) processed_docs.append(txt_filepath) elif doc.endswith(('.mp3', '.mp4', '.mpeg')): processed_docs.append(transcribe_and_save(doc)) else: processed_docs.append(doc) return processed_docs # Process PDF files def process_pdf(pdf_path): reader = PdfReader(pdf_path) all_text = "" for page in reader.pages: extracted_text = page.extract_text() if extracted_text: processed_text = ' '.join(extracted_text.split('\n')) all_text += processed_text + "\n\n" txt_filepath = save_extracted_text_to_txt(all_text, os.path.basename(pdf_path)) os.remove(pdf_path) # Delete the original PDF file return txt_filepath # Main logic for handling OpenAI API key and document processing if "openai_api_key" not in st.session_state: openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password") if not openai_api_key: st.sidebar.warning("Please add your OpenAI API key to continue!!") st.warning("Please add your OpenAI API key to continue!!") st.sidebar.info("To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you're unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn't require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services.") st.stop() st.session_state["openai_api_key"] = openai_api_key st.sidebar.text_input("Enter Youtube Video ID(Coming soon)", disabled=True) st.sidebar.text_input("Enter Spotify Podast link(Coming soon)", disabled=True) openai_api_key = get_key() if openai_api_key: st.toast('OpenAI API Key Added ✅') # Define service-context with st.sidebar: with st.expander("Advanced Settings"): st.session_state['temperature'] = st.number_input("Enter Temperature", help="It determines how creative the model should be", min_value=0.0,max_value=1.0, value=0.1) llm = OpenAI(temperature=st.session_state['temperature'], model='gpt-3.5-turbo', api_key=openai_api_key) embed_model = OpenAIEmbedding(api_key=openai_api_key) service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) set_global_service_context(service_context) # Upload PDFs, DOCs, TXTs, MP3s, and MP4s documents = upload_files(types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx'], accept_multiple_files=True) # Process the uploaded documents processed_documents = process_documents(documents) if not processed_documents: st.warning("No documents uploaded!") st.stop() index = build_index(processed_documents) query_engine = index.as_chat_engine(chat_mode="condense_question", streaming=True) messages = st.session_state.get("messages", []) if not messages: messages.append({"role": "assistant", "text": "Hi!"}) for message in messages: render_message(message) if user_query := st.chat_input(): message = {"role": "user", "text": user_query} messages.append(message) render_message(message) with st.chat_message("assistant"): stream = query_engine.stream_chat(user_query) text = handle_stream(st.empty(), stream) message = {"role": "assistant", "text": text} messages.append(message) st.session_state.messages = messages # Function to use LLMs with web search def use_llms_with_web(): from langchain.agents import ConversationalChatAgent, AgentExecutor from langchain.callbacks import StreamlitCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain.tools import DuckDuckGoSearchRun import streamlit as st st.title("Use web search with LLMs") # Taking OpenAI API key input from the user openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password") # Initializing message history and memory msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output" ) # Resetting chat history logic if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"): msgs.clear() msgs.add_ai_message("How can I help you?") st.session_state.steps = {} # Defining avatars for chat messages avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status(f"**{step[0].tool}**: {step[0].tool_input}", state="complete"): st.write(step[0].log) st.write(step[1]) st.write(msg.content) # Taking new input from the user if prompt := st.chat_input(placeholder="Who won the 2022 Cricket World Cup?"): st.chat_message("user").write(prompt) # Checking if OpenAI API key is provided if not openai_api_key: st.info("Please add your OpenAI API key to continue.") st.stop() # Initializing LLM and tools for web search llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True) tools = [DuckDuckGoSearchRun(name="Search")] chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"] # Function to display chat with dataset page def chat_with_dataset(): from langchain.agents import AgentType from langchain.agents import create_pandas_dataframe_agent from langchain.callbacks import StreamlitCallbackHandler from langchain.chat_models import ChatOpenAI import streamlit as st import pandas as pd import os file_formats = { "csv": pd.read_csv, "xls": pd.read_excel, "xlsx": pd.read_excel, "xlsm": pd.read_excel, "xlsb": pd.read_excel, } def clear_submit(): """ Clear the Submit Button State Returns: """ st.session_state["submit"] = False @st.cache_data() def load_data(uploaded_file): """ Load data from the uploaded file based on its extension. """ try: ext = os.path.splitext(uploaded_file.name)[1][1:].lower() except: ext = uploaded_file.split(".")[-1] if ext in file_formats: return file_formats[ext](uploaded_file) else: st.error(f"Unsupported file format: {ext}") return None st.title("Chat with your dataset") st.info("Asking one question at a time will result in a better output") uploaded_file = st.file_uploader( "Upload a Data file", type=list(file_formats.keys()), help="Various File formats are Support", on_change=clear_submit, ) df = None # Initialize df to None outside the if block if uploaded_file: df = load_data(uploaded_file) # df will be assigned a value if uploaded_file is truthy if df is None: # Check if df is still None before proceeding st.warning("No data file uploaded or there was an error in loading the data.") return # Exit the function early if df is None openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password") st.sidebar.info("If you face a KeyError: 'content' error, Press the clear conversation histroy button") if "messages" not in st.session_state or st.sidebar.button("Clear conversation history"): st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}] # Display previous chat messages for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) if prompt := st.chat_input(placeholder="What is this data about?"): st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) # Check if OpenAI API key is provided if not openai_api_key: st.info("Please add your OpenAI API key to continue.") st.stop() llm = ChatOpenAI( temperature=0, model="gpt-3.5-turbo-0613", openai_api_key=openai_api_key, streaming=True ) pandas_df_agent = create_pandas_dataframe_agent( llm, df, verbose=True, agent_type=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = pandas_df_agent.run(st.session_state.messages, callbacks=[st_cb]) st.session_state.messages.append({"role": "assistant", "content": response}) st.write(response) # Function to display transform products page def transform_products(): import streamlit as st import requests import os import replicate import io from PIL import Image st.session_state['replicate_api_token'] = st.sidebar.text_input("Replicate API Token", type='password') os.environ['REPLICATE_API_TOKEN'] = st.session_state['replicate_api_token'] if not st.session_state['replicate_api_token']: st.sidebar.warning('Please enter your Replicate API Token to continue!!') st.sidebar.info("You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)") st.stop() if st.session_state['replicate_api_token']: st.info("This model works best with product images having transparent or plain backgrounds") # Prompt user to upload an image file img = st.file_uploader("Upload your product image", type=['png', 'jpg', 'jpeg']) if img is not None: has_plain_background = st.toggle("Does your product image have a plain or transparent background? If not, let us do the hard work for you!") prompt = st.text_input("Enter Prompt", help="Enter something you imagine...") negative_prompt = st.text_input("Enter Negative Prompt", help="Write what you don't want in the generated images") submit = st.button("Submit") if submit: if has_plain_background: # If image already has a plain background, prepare it for Replicate image = Image.open(img) bytes_obj = io.BytesIO() image.save(bytes_obj, format='PNG') bytes_obj.seek(0) else: # If image does not have a plain background, send it to ClipDrop to remove background image_file_object = img.read() r = requests.post('https://clipdrop-api.co/remove-background/v1', files={ 'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg') }, headers={'x-api-key': st.secrets['clipdrop_api_key']} ) if r.ok: # If background removal is successful, prepare image for Replicate image = Image.open(io.BytesIO(r.content)) bytes_obj = io.BytesIO() image.save(bytes_obj, format='PNG') bytes_obj.seek(0) else: r.raise_for_status() st.error('Failed to remove background. Try again.') st.stop() # Send image to Replicate for transformation output = replicate.run( "logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df", input={"image_path": bytes_obj, "prompt": prompt, "image_num": 4} ) col1, col2 = st.columns(2) with col1: st.image(output[1]) st.image(output[2]) with col2: st.image(output[3]) st.image(output[4]) # Function to generate images based on user input def generate_images(): import streamlit as st import replicate import os st.session_state['replicate_api_token'] = st.sidebar.text_input("Replicate API Token", type='password') os.environ['REPLICATE_API_TOKEN'] = st.session_state['replicate_api_token'] if not st.session_state['replicate_api_token']: st.sidebar.warning('Please enter your Replicate API Token to continue!!') st.sidebar.info("You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)") st.stop() if st.session_state['replicate_api_token']: prompt = st.text_input( "Enter prompt", help="Write something you can imagine..." ) negative_prompt = st.text_input( "Enter Negative prompt", help="Write what you don't want to see in the generated images" ) submit = st.button("Submit") if submit: output = replicate.run( "stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f", input={ "prompt": prompt, "negative_prompt": negative_prompt, "num_outputs": 4 }, ) col1, col2 = st.columns(2) with col1: st.image(output[0]) st.image(output[2]) with col2: st.image(output[1]) st.image(output[3]) # Dictonary to store all functions as pages page_names_to_funcs = { "Home 🏠": home, "Chat with files 📁": chat_with_files, "Chat with dataset 📖": chat_with_dataset, "Use web search with LLMs 🌐": use_llms_with_web, "Generate Images 🖌️": generate_images, "Transform your products 🎨": transform_products, } # display page by dictionary demo_name = st.sidebar.selectbox("Choose a page to navigate to", page_names_to_funcs.keys()) page_names_to_funcs[demo_name]()
[ "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.chat_models.ChatOpenAI", "langchain.memory.chat_message_histories.StreamlitChatMessageHistory", "langchain.memory.ConversationBufferMemory", "langchain.agents.ConversationalChatAgent.from_llm_and_tools", "langchain.tools.DuckDuckGoSearchRun", "langchain.agents.create_pandas_dataframe_agent" ]
[((91, 213), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""QuickDigest AI"""', 'page_icon': '""":brain:"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""expanded"""'}), "(page_title='QuickDigest AI', page_icon=':brain:', layout\n ='wide', initial_sidebar_state='expanded')\n", (109, 213), True, 'import streamlit as st\n'), ((540, 608), 'streamlit_extras.let_it_rain.rain', 'rain', ([], {'emoji': '"""🎈"""', 'font_size': '(54)', 'falling_speed': '(5)', 'animation_length': '"""1"""'}), "(emoji='🎈', font_size=54, falling_speed=5, animation_length='1')\n", (544, 608), False, 'from streamlit_extras.let_it_rain import rain\n'), ((717, 918), 'streamlit_extras.colored_header.colored_header', 'colored_header', ([], {'label': '"""QuickDigest AI🧠, Your Intelligent Data Companion"""', 'description': '"""~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop"""', 'color_name': '"""violet-70"""'}), "(label='QuickDigest AI🧠, Your Intelligent Data Companion',\n description=\n '~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop'\n , color_name='violet-70')\n", (731, 918), False, 'from streamlit_extras.colored_header import colored_header\n'), ((998, 1164), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen."""'], {}), "(\n 'Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen.'\n )\n", (1013, 1164), True, 'import streamlit as st\n'), ((1173, 1599), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq)."""'], {}), "(\n 'For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq).'\n )\n", (1188, 1599), True, 'import streamlit as st\n'), ((1608, 1738), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""LLMs may produce inaccurate information about people, places, or facts. Don\'t entirely trust them."""'], {}), '(\n "LLMs may produce inaccurate information about people, places, or facts. Don\'t entirely trust them."\n )\n', (1626, 1738), True, 'import streamlit as st\n'), ((1795, 2139), 'streamlit.markdown', 'st.markdown', (['"""<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>"""'], {'unsafe_allow_html': '(True)'}), "(\n '<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>'\n , unsafe_allow_html=True)\n", (1806, 2139), True, 'import streamlit as st\n'), ((2156, 2585), 'streamlit.markdown', 'st.markdown', (['"""**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform\'s cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files."""'], {}), '(\n "**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform\'s cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files."\n )\n', (2167, 2585), True, 'import streamlit as st\n'), ((2594, 2930), 'streamlit.markdown', 'st.markdown', (['"""**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience."""'], {}), "(\n '**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience.'\n )\n", (2605, 2930), True, 'import streamlit as st\n'), ((2939, 3289), 'streamlit.markdown', 'st.markdown', (['"""**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI\'s real-time web search feature ensures you\'re always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time."""'], {}), '(\n "**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI\'s real-time web search feature ensures you\'re always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time."\n )\n', (2950, 3289), True, 'import streamlit as st\n'), ((3298, 3734), 'streamlit.markdown', 'st.markdown', (['"""**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes."""'], {}), "(\n '**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes.'\n )\n", (3309, 3734), True, 'import streamlit as st\n'), ((3744, 3762), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (3755, 3762), True, 'import streamlit as st\n'), ((3835, 3893), 'streamlit.markdown', 'st.markdown', (['"""<h5>Support Us</h5>"""'], {'unsafe_allow_html': '(True)'}), "('<h5>Support Us</h5>', unsafe_allow_html=True)\n", (3846, 3893), True, 'import streamlit as st\n'), ((3923, 3936), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (3933, 3936), True, 'import streamlit as st\n'), ((5058, 5133), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '"""Indexing documents...Please have patience"""'}), "(show_spinner='Indexing documents...Please have patience')\n", (5075, 5133), True, 'import streamlit as st\n'), ((10968, 11043), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Enter Youtube Video ID(Coming soon)"""'], {'disabled': '(True)'}), "('Enter Youtube Video ID(Coming soon)', disabled=True)\n", (10989, 11043), True, 'import streamlit as st\n'), ((11048, 11126), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Enter Spotify Podast link(Coming soon)"""'], {'disabled': '(True)'}), "('Enter Spotify Podast link(Coming soon)', disabled=True)\n", (11069, 11126), True, 'import streamlit as st\n'), ((13550, 13586), 'streamlit.title', 'st.title', (['"""Use web search with LLMs"""'], {}), "('Use web search with LLMs')\n", (13558, 13586), True, 'import streamlit as st\n'), ((13656, 13712), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (13677, 13712), True, 'import streamlit as st\n'), ((13770, 13799), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (13797, 13799), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((13813, 13930), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (13837, 13930), False, 'from langchain.memory import ConversationBufferMemory\n'), ((16632, 16647), 'streamlit.cache_data', 'st.cache_data', ([], {}), '()\n', (16645, 16647), True, 'import streamlit as st\n'), ((17100, 17134), 'streamlit.title', 'st.title', (['"""Chat with your dataset"""'], {}), "('Chat with your dataset')\n", (17108, 17134), True, 'import streamlit as st\n'), ((17139, 17210), 'streamlit.info', 'st.info', (['"""Asking one question at a time will result in a better output"""'], {}), "('Asking one question at a time will result in a better output')\n", (17146, 17210), True, 'import streamlit as st\n'), ((17819, 17875), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (17840, 17875), True, 'import streamlit as st\n'), ((17885, 17998), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""If you face a KeyError: \'content\' error, Press the clear conversation histroy button"""'], {}), '(\n "If you face a KeyError: \'content\' error, Press the clear conversation histroy button"\n )\n', (17900, 17998), True, 'import streamlit as st\n'), ((19630, 19691), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Replicate API Token"""'], {'type': '"""password"""'}), "('Replicate API Token', type='password')\n", (19651, 19691), True, 'import streamlit as st\n'), ((22919, 22980), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Replicate API Token"""'], {'type': '"""password"""'}), "('Replicate API Token', type='password')\n", (22940, 22980), True, 'import streamlit as st\n'), ((3960, 4002), 'streamlit.write', 'st.write', (['"""Star this repository on Github"""'], {}), "('Star this repository on Github')\n", (3968, 4002), True, 'import streamlit as st\n'), ((4011, 4067), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""github"""', 'name': '"""codingis4noobs2/QuickDigest"""'}), "(type='github', name='codingis4noobs2/QuickDigest')\n", (4016, 4067), False, 'from streamlit_extras.badges import badge\n'), ((4091, 4123), 'streamlit.write', 'st.write', (['"""Follow me on twitter"""'], {}), "('Follow me on twitter')\n", (4099, 4123), True, 'import streamlit as st\n'), ((4132, 4172), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""twitter"""', 'name': '"""4gameparth"""'}), "(type='twitter', name='4gameparth')\n", (4137, 4172), False, 'from streamlit_extras.badges import badge\n'), ((4196, 4223), 'streamlit.write', 'st.write', (['"""Buy me a coffee"""'], {}), "('Buy me a coffee')\n", (4204, 4223), True, 'import streamlit as st\n'), ((4232, 4282), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""buymeacoffee"""', 'name': '"""codingis4noobs2"""'}), "(type='buymeacoffee', name='codingis4noobs2')\n", (4237, 4282), False, 'from streamlit_extras.badges import badge\n'), ((4306, 4397), 'streamlit.link_button', 'st.link_button', (['"""Upvote on Replit"""', '"""https://replit.com/@ParthShah38/QuickDigestAI?v=1"""'], {}), "('Upvote on Replit',\n 'https://replit.com/@ParthShah38/QuickDigestAI?v=1')\n", (4320, 4397), True, 'import streamlit as st\n'), ((5251, 5293), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (5282, 5293), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((5963, 5980), 'assemblyai.Transcriber', 'aai.Transcriber', ([], {}), '()\n', (5978, 5980), True, 'import assemblyai as aai\n'), ((6337, 6398), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': 'f"""Upload files"""', 'type': 'types'}), "(label=f'Upload files', type=types, **kwargs)\n", (6353, 6398), True, 'import streamlit as st\n'), ((7852, 7869), 'assemblyai.Transcriber', 'aai.Transcriber', ([], {}), '()\n', (7867, 7869), True, 'import assemblyai as aai\n'), ((8262, 8299), 'os.path.join', 'os.path.join', (['"""uploads"""', 'txt_filename'], {}), "('uploads', txt_filename)\n", (8274, 8299), False, 'import os\n'), ((8628, 8647), 'docx.Document', 'Document', (['file_path'], {}), '(file_path)\n', (8636, 8647), False, 'from docx import Document\n'), ((9524, 9543), 'PyPDF2.PdfReader', 'PdfReader', (['pdf_path'], {}), '(pdf_path)\n', (9533, 9543), False, 'from PyPDF2 import PdfReader\n'), ((9898, 9917), 'os.remove', 'os.remove', (['pdf_path'], {}), '(pdf_path)\n', (9907, 9917), False, 'import os\n'), ((10122, 10178), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (10143, 10178), True, 'import streamlit as st\n'), ((11195, 11229), 'streamlit.toast', 'st.toast', (['"""OpenAI API Key Added ✅"""'], {}), "('OpenAI API Key Added ✅')\n", (11203, 11229), True, 'import streamlit as st\n'), ((11535, 11637), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': "st.session_state['temperature']", 'model': '"""gpt-3.5-turbo"""', 'api_key': 'openai_api_key'}), "(temperature=st.session_state['temperature'], model='gpt-3.5-turbo',\n api_key=openai_api_key)\n", (11541, 11637), False, 'from llama_index.llms import OpenAI\n'), ((11656, 11695), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_key': 'openai_api_key'}), '(api_key=openai_api_key)\n', (11671, 11695), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((11722, 11784), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (11750, 11784), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((11793, 11836), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (11819, 11836), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((12378, 12414), 'streamlit.session_state.get', 'st.session_state.get', (['"""messages"""', '[]'], {}), "('messages', [])\n", (12398, 12414), True, 'import streamlit as st\n'), ((14009, 14048), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (14026, 14048), True, 'import streamlit as st\n'), ((14812, 14876), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Who won the 2022 Cricket World Cup?"""'}), "(placeholder='Who won the 2022 Cricket World Cup?')\n", (14825, 14876), True, 'import streamlit as st\n'), ((15159, 15248), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai_api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai_api_key,\n streaming=True)\n", (15169, 15248), False, 'from langchain.chat_models import ChatOpenAI\n'), ((15319, 15383), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (15361, 15383), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((15404, 15549), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (15438, 15549), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((17662, 17740), 'streamlit.warning', 'st.warning', (['"""No data file uploaded or there was an error in loading the data."""'], {}), "('No data file uploaded or there was an error in loading the data.')\n", (17672, 17740), True, 'import streamlit as st\n'), ((18034, 18081), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Clear conversation history"""'], {}), "('Clear conversation history')\n", (18051, 18081), True, 'import streamlit as st\n'), ((18341, 18394), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""What is this data about?"""'}), "(placeholder='What is this data about?')\n", (18354, 18394), True, 'import streamlit as st\n'), ((18404, 18473), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (18436, 18473), True, 'import streamlit as st\n'), ((18701, 18806), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""', 'openai_api_key': 'openai_api_key', 'streaming': '(True)'}), "(temperature=0, model='gpt-3.5-turbo-0613', openai_api_key=\n openai_api_key, streaming=True)\n", (18711, 18806), False, 'from langchain.chat_models import ChatOpenAI\n'), ((18851, 18975), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', (['llm', 'df'], {'verbose': '(True)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)'}), '(llm, df, verbose=True, agent_type=AgentType.\n OPENAI_FUNCTIONS, handle_parsing_errors=True)\n', (18880, 18975), False, 'from langchain.agents import create_pandas_dataframe_agent\n'), ((19833, 19906), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please enter your Replicate API Token to continue!!"""'], {}), "('Please enter your Replicate API Token to continue!!')\n", (19851, 19906), True, 'import streamlit as st\n'), ((19915, 20045), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)"""'], {}), "(\n 'You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)'\n )\n", (19930, 20045), True, 'import streamlit as st\n'), ((20044, 20053), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (20051, 20053), True, 'import streamlit as st\n'), ((20119, 20221), 'streamlit.info', 'st.info', (['"""This model works best with product images having transparent or plain backgrounds"""'], {}), "(\n 'This model works best with product images having transparent or plain backgrounds'\n )\n", (20126, 20221), True, 'import streamlit as st\n'), ((20272, 20346), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your product image"""'], {'type': "['png', 'jpg', 'jpeg']"}), "('Upload your product image', type=['png', 'jpg', 'jpeg'])\n", (20288, 20346), True, 'import streamlit as st\n'), ((23122, 23195), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please enter your Replicate API Token to continue!!"""'], {}), "('Please enter your Replicate API Token to continue!!')\n", (23140, 23195), True, 'import streamlit as st\n'), ((23204, 23334), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)"""'], {}), "(\n 'You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)'\n )\n", (23219, 23334), True, 'import streamlit as st\n'), ((23333, 23342), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (23340, 23342), True, 'import streamlit as st\n'), ((23409, 23481), 'streamlit.text_input', 'st.text_input', (['"""Enter prompt"""'], {'help': '"""Write something you can imagine..."""'}), "('Enter prompt', help='Write something you can imagine...')\n", (23422, 23481), True, 'import streamlit as st\n'), ((23543, 23651), 'streamlit.text_input', 'st.text_input', (['"""Enter Negative prompt"""'], {'help': '"""Write what you don\'t want to see in the generated images"""'}), '(\'Enter Negative prompt\', help=\n "Write what you don\'t want to see in the generated images")\n', (23556, 23651), True, 'import streamlit as st\n'), ((23699, 23718), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (23708, 23718), True, 'import streamlit as st\n'), ((5786, 5818), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5801, 5818), True, 'import streamlit as st\n'), ((5832, 5857), 'streamlit.write', 'st.write', (["message['text']"], {}), "(message['text'])\n", (5840, 5857), True, 'import streamlit as st\n'), ((6455, 6540), 'streamlit.info', 'st.info', (['f"""Please add documents, Note: Scanned documents are not supported yet!"""'], {}), "(f'Please add documents, Note: Scanned documents are not supported yet!'\n )\n", (6462, 6540), True, 'import streamlit as st\n'), ((6548, 6557), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (6555, 6557), True, 'import streamlit as st\n'), ((9862, 9888), 'os.path.basename', 'os.path.basename', (['pdf_path'], {}), '(pdf_path)\n', (9878, 9888), False, 'import os\n'), ((10222, 10288), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please add your OpenAI API key to continue!!"""'], {}), "('Please add your OpenAI API key to continue!!')\n", (10240, 10288), True, 'import streamlit as st\n'), ((10301, 10359), 'streamlit.warning', 'st.warning', (['"""Please add your OpenAI API key to continue!!"""'], {}), "('Please add your OpenAI API key to continue!!')\n", (10311, 10359), True, 'import streamlit as st\n'), ((10372, 10890), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you\'re unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn\'t require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services."""'], {}), '(\n "To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you\'re unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn\'t require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services."\n )\n', (10387, 10890), True, 'import streamlit as st\n'), ((10893, 10902), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (10900, 10902), True, 'import streamlit as st\n'), ((12158, 12194), 'streamlit.warning', 'st.warning', (['"""No documents uploaded!"""'], {}), "('No documents uploaded!')\n", (12168, 12194), True, 'import streamlit as st\n'), ((12207, 12216), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (12214, 12216), True, 'import streamlit as st\n'), ((12603, 12618), 'streamlit.chat_input', 'st.chat_input', ([], {}), '()\n', (12616, 12618), True, 'import streamlit as st\n'), ((14310, 14344), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (14325, 14344), True, 'import streamlit as st\n'), ((14735, 14756), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (14743, 14756), True, 'import streamlit as st\n'), ((15016, 15070), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (15023, 15070), True, 'import streamlit as st\n'), ((15083, 15092), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (15090, 15092), True, 'import streamlit as st\n'), ((15262, 15296), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Search"""'}), "(name='Search')\n", (15281, 15296), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((15630, 15658), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (15645, 15658), True, 'import streamlit as st\n'), ((15819, 15847), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (15827, 15847), True, 'import streamlit as st\n'), ((17027, 17070), 'streamlit.error', 'st.error', (['f"""Unsupported file format: {ext}"""'], {}), "(f'Unsupported file format: {ext}')\n", (17035, 17070), True, 'import streamlit as st\n'), ((18609, 18663), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (18616, 18663), True, 'import streamlit as st\n'), ((18676, 18685), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (18683, 18685), True, 'import streamlit as st\n'), ((19056, 19084), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (19071, 19084), True, 'import streamlit as st\n'), ((19275, 19351), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': response}"], {}), "({'role': 'assistant', 'content': response})\n", (19307, 19351), True, 'import streamlit as st\n'), ((19364, 19382), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (19372, 19382), True, 'import streamlit as st\n'), ((20411, 20538), 'streamlit.toggle', 'st.toggle', (['"""Does your product image have a plain or transparent background? If not, let us do the hard work for you!"""'], {}), "(\n 'Does your product image have a plain or transparent background? If not, let us do the hard work for you!'\n )\n", (20420, 20538), True, 'import streamlit as st\n'), ((20550, 20618), 'streamlit.text_input', 'st.text_input', (['"""Enter Prompt"""'], {'help': '"""Enter something you imagine..."""'}), "('Enter Prompt', help='Enter something you imagine...')\n", (20563, 20618), True, 'import streamlit as st\n'), ((20649, 20750), 'streamlit.text_input', 'st.text_input', (['"""Enter Negative Prompt"""'], {'help': '"""Write what you don\'t want in the generated images"""'}), '(\'Enter Negative Prompt\', help=\n "Write what you don\'t want in the generated images")\n', (20662, 20750), True, 'import streamlit as st\n'), ((20767, 20786), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (20776, 20786), True, 'import streamlit as st\n'), ((23760, 23953), 'replicate.run', 'replicate.run', (['"""stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f"""'], {'input': "{'prompt': prompt, 'negative_prompt': negative_prompt, 'num_outputs': 4}"}), "(\n 'stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f'\n , input={'prompt': prompt, 'negative_prompt': negative_prompt,\n 'num_outputs': 4})\n", (23773, 23953), False, 'import replicate\n'), ((24092, 24105), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (24102, 24105), True, 'import streamlit as st\n'), ((5182, 5222), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'files'}), '(input_files=files)\n', (5203, 5222), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((7594, 7623), 'streamlit.expander', 'st.expander', (['"""Uploaded Files"""'], {}), "('Uploaded Files')\n", (7605, 7623), True, 'import streamlit as st\n'), ((7729, 7763), 'streamlit.markdown', 'st.markdown', (['f"""{filepaths_pretty}"""'], {}), "(f'{filepaths_pretty}')\n", (7740, 7763), True, 'import streamlit as st\n'), ((8200, 8226), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (8216, 8226), False, 'import os\n'), ((11305, 11337), 'streamlit.expander', 'st.expander', (['"""Advanced Settings"""'], {}), "('Advanced Settings')\n", (11316, 11337), True, 'import streamlit as st\n'), ((11389, 11530), 'streamlit.number_input', 'st.number_input', (['"""Enter Temperature"""'], {'help': '"""It determines how creative the model should be"""', 'min_value': '(0.0)', 'max_value': '(1.0)', 'value': '(0.1)'}), "('Enter Temperature', help=\n 'It determines how creative the model should be', min_value=0.0,\n max_value=1.0, value=0.1)\n", (11404, 11530), True, 'import streamlit as st\n'), ((12770, 12798), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (12785, 12798), True, 'import streamlit as st\n'), ((14886, 14909), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (14901, 14909), True, 'import streamlit as st\n'), ((15705, 15719), 'streamlit.container', 'st.container', ([], {}), '()\n', (15717, 15719), True, 'import streamlit as st\n'), ((18272, 18300), 'streamlit.chat_message', 'st.chat_message', (["msg['role']"], {}), "(msg['role'])\n", (18287, 18300), True, 'import streamlit as st\n'), ((18482, 18505), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (18497, 18505), True, 'import streamlit as st\n'), ((19131, 19145), 'streamlit.container', 'st.container', ([], {}), '()\n', (19143, 19145), True, 'import streamlit as st\n'), ((22251, 22429), 'replicate.run', 'replicate.run', (['"""logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df"""'], {'input': "{'image_path': bytes_obj, 'prompt': prompt, 'image_num': 4}"}), "(\n 'logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df'\n , input={'image_path': bytes_obj, 'prompt': prompt, 'image_num': 4})\n", (22264, 22429), False, 'import replicate\n'), ((22507, 22520), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (22517, 22520), True, 'import streamlit as st\n'), ((24145, 24164), 'streamlit.image', 'st.image', (['output[0]'], {}), '(output[0])\n', (24153, 24164), True, 'import streamlit as st\n'), ((24181, 24200), 'streamlit.image', 'st.image', (['output[2]'], {}), '(output[2])\n', (24189, 24200), True, 'import streamlit as st\n'), ((24240, 24259), 'streamlit.image', 'st.image', (['output[1]'], {}), '(output[1])\n', (24248, 24259), True, 'import streamlit as st\n'), ((24276, 24295), 'streamlit.image', 'st.image', (['output[3]'], {}), '(output[3])\n', (24284, 24295), True, 'import streamlit as st\n'), ((12899, 12909), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (12907, 12909), True, 'import streamlit as st\n'), ((14569, 14641), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (14578, 14641), True, 'import streamlit as st\n'), ((14663, 14684), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (14671, 14684), True, 'import streamlit as st\n'), ((14705, 14722), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (14713, 14722), True, 'import streamlit as st\n'), ((20968, 20983), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (20978, 20983), False, 'from PIL import Image\n'), ((21016, 21028), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (21026, 21028), False, 'import io\n'), ((21326, 21530), 'requests.post', 'requests.post', (['"""https://clipdrop-api.co/remove-background/v1"""'], {'files': "{'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg')}", 'headers': "{'x-api-key': st.secrets['clipdrop_api_key']}"}), "('https://clipdrop-api.co/remove-background/v1', files={\n 'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg')},\n headers={'x-api-key': st.secrets['clipdrop_api_key']})\n", (21339, 21530), False, 'import requests\n'), ((22568, 22587), 'streamlit.image', 'st.image', (['output[1]'], {}), '(output[1])\n', (22576, 22587), True, 'import streamlit as st\n'), ((22608, 22627), 'streamlit.image', 'st.image', (['output[2]'], {}), '(output[2])\n', (22616, 22627), True, 'import streamlit as st\n'), ((22675, 22694), 'streamlit.image', 'st.image', (['output[3]'], {}), '(output[3])\n', (22683, 22694), True, 'import streamlit as st\n'), ((22715, 22734), 'streamlit.image', 'st.image', (['output[4]'], {}), '(output[4])\n', (22723, 22734), True, 'import streamlit as st\n'), ((9163, 9184), 'os.path.basename', 'os.path.basename', (['doc'], {}), '(doc)\n', (9179, 9184), False, 'import os\n'), ((21868, 21880), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (21878, 21880), False, 'import io\n'), ((22078, 22129), 'streamlit.error', 'st.error', (['"""Failed to remove background. Try again."""'], {}), "('Failed to remove background. Try again.')\n", (22086, 22129), True, 'import streamlit as st\n'), ((22154, 22163), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (22161, 22163), True, 'import streamlit as st\n'), ((16802, 16838), 'os.path.splitext', 'os.path.splitext', (['uploaded_file.name'], {}), '(uploaded_file.name)\n', (16818, 16838), False, 'import os\n'), ((21809, 21830), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (21819, 21830), False, 'import io\n')]
from time import monotonic from rich.console import Console from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.llms import OpenAI class Experiment: """ A class representing an experiment. Attributes: params (dict): A dictionary containing experiment parameters. index (int): An integer representing the index of the experiment. documents (list): A list of documents to be used in the experiment. vectorstore_list (list): A list of vector stores used in the experiment. retrievers_list (list): A list of retrievers used in the experiment. chain (object): An object representing the chain used in the experiment. query_results (dict): A dictionary containing the results of the queries. run_time (float): A float representing the time it took to run the experiment. embedding_time (float): A float representing the time it took to embed the documents. Methods: run(eval_queries): Runs the experiment with the given evaluation queries. _evaluate(chain, eval_queries): Evaluates the experiment with the given chain and evaluation queries. """ def __init__( self, params, index=0, documents=None, vectorstore_list=None, retrievers_list=None, chain=None, ): """ Initializes an Experiment object. Args: params (dict): A dictionary containing experiment parameters. index (int): An integer representing the index of the experiment. documents (list): A list of documents to be used in the experiment. vectorstore_list (list): A list of vector stores used in the experiment. retrievers_list (list): A list of retrievers used in the experiment. chain (object): An object representing the chain used in the experiment. """ self.params = params self.index = index self.console = Console() self.documents = documents self.vectorstore_list = vectorstore_list self.retrievers_list = retrievers_list self.chain = chain self.query_results = {} # time variables self.run_time = 0 self.embedding_time = 0 def run(self, eval_queries): """ Runs the experiment with the given evaluation queries. Args: eval_queries (list): A list of evaluation queries. Returns: A tuple containing the query results, the run time, and the embedding time. """ self.console.log(f"Experiment {self.index} started") self.start_time = monotonic() text_splitter = RecursiveCharacterTextSplitter( chunk_size=self.params["chunk_size"], chunk_overlap=0 ) texts = text_splitter.split_documents(self.documents) self.pre_embedding = monotonic() db = self.params["vector_store"].from_documents( texts, self.params["embeddings"] ) self.post_embedding = monotonic() self.vectorstore_list.append(db) retriever = db.as_retriever(search_kwargs={"k": 2}) self.retrievers_list.append(retriever) qa = self.chain.from_chain_type( llm=OpenAI(), chain_type="stuff", retriever=retriever ) self.pre_queries = monotonic() self._evaluate(qa, eval_queries) return self.query_results, self.run_time, self.embedding_time def _evaluate(self, chain, eval_queries): """ Evaluates the experiment with the given chain and evaluation queries. Args: chain (object): An object representing the chain used in the experiment. eval_queries (list): A list of evaluation queries. """ self.console.log(f"Evaluating experiment {self.index}") for q in eval_queries: res = chain.run(q) self.query_results[q] = res post_queries = monotonic() self.console.log(f"Finished experiment {self.index}") self.run_time = post_queries - self.start_time self.embedding_time = self.post_embedding - self.pre_embedding
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.llms.OpenAI" ]
[((1970, 1979), 'rich.console.Console', 'Console', ([], {}), '()\n', (1977, 1979), False, 'from rich.console import Console\n'), ((2643, 2654), 'time.monotonic', 'monotonic', ([], {}), '()\n', (2652, 2654), False, 'from time import monotonic\n'), ((2680, 2769), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': "self.params['chunk_size']", 'chunk_overlap': '(0)'}), "(chunk_size=self.params['chunk_size'],\n chunk_overlap=0)\n", (2710, 2769), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2880, 2891), 'time.monotonic', 'monotonic', ([], {}), '()\n', (2889, 2891), False, 'from time import monotonic\n'), ((3034, 3045), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3043, 3045), False, 'from time import monotonic\n'), ((3341, 3352), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3350, 3352), False, 'from time import monotonic\n'), ((3964, 3975), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3973, 3975), False, 'from time import monotonic\n'), ((3253, 3261), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (3259, 3261), False, 'from langchain.llms import OpenAI\n')]
import os os.environ["CUDA_VISIBLE_DEVICES"] = "2" import re import torch import gradio as gr from clc.langchain_application import LangChainApplication, torch_gc from transformers import StoppingCriteriaList, StoppingCriteriaList from clc.callbacks import Iteratorize, Stream from clc.matching import key_words_match_intention, key_words_match_knowledge from langchain.schema import Document # 调试使用 # os.chdir("../../../") class LangChainCFG: llm_model_name = 'luwen_baichuan/output/zju_model_0813_100k' # 本地模型文件 or huggingface远程仓库 embedding_model_name = 'app/langchain_demo/model/text2vec' # 检索模型文件 or huggingface远程仓库 vector_store_path = 'app/langchain_demo/data/cache/legal_articles' kg_vector_stores = { '法律法条': 'app/langchain_demo/data/cache/legal_articles', '法律书籍': 'app/langchain_demo/data/cache/legal_books', '法律文书模板':'app/langchain_demo/data/cache/legal_templates', '法律案例': 'app/langchain_demo/data/cache/legal_cases', '法律考试': 'app/langchain_demo/data/cache/judicialExamination', '日常法律问答': 'app/langchain_demo/data/cache/legal_QA', } config = LangChainCFG() application = LangChainApplication(config) def clear_session(): return '', None, "" def predict(input, kg_names=None, history=None, intention_reg=None, **kwargs): max_length=1024 top_k = 1 application.llm_service.max_token = max_length # print(input) if history == None: history = [] search_text = '' now_input = input eos_token_ids = [application.llm_service.tokenizer.eos_token_id] application.llm_service.history = history[-5:] max_memory = 4096 - max_length if intention_reg==["意图识别"]: auto_kg_names = key_words_match_intention(input) if len(auto_kg_names)==0: search_text += "意图识别没有匹配到知识库。\n\n" else: match_kg_names = "、".join(list(auto_kg_names)) search_text += "意图识别匹配到知识库是:"+match_kg_names+"。\n\n" kg_names = list(set(kg_names) | auto_kg_names) kb_based = True if len(kg_names) != 0 else False if len(history) != 0: input = "".join(["</s>Human:" + i[0] + " </s>Assistant: " + i[1] for i in application.llm_service.history]) + \ "</s>Human:" + input input = input[len("</s>Human:"):] if len(input) > max_memory: input = input[-max_memory:] if kb_based: related_docs_with_score_seq = [] for kg_name in kg_names: if kg_name=="法律法条": related_article = key_words_match_knowledge(application.all_articles, application.choices, now_input) if related_article: kg_matches = [(Document(page_content=related_article[0], metadata={"value": related_article[1]}),0)] else: application.source_service.load_vector_store(application.config.kg_vector_stores[kg_name]) kg_matches = application.source_service.vector_store.similarity_search_with_score(input, k=top_k) else: application.source_service.load_vector_store(application.config.kg_vector_stores[kg_name]) kg_matches = application.source_service.vector_store.similarity_search_with_score(input, k=top_k) related_docs_with_score_seq.append(kg_matches) related_docs_with_score = related_docs_with_score_seq if len(related_docs_with_score) > 0: input, context_with_score = application.generate_prompt(related_docs_with_score, input,kg_names) search_text += context_with_score torch_gc() print("histroy in call: ", history) prompt = f'</s>Human:{input} </s>Assistant: ' print("prompt: ",prompt) inputs = application.llm_service.tokenizer(prompt, return_tensors="pt").to('cuda') stopping_criteria = StoppingCriteriaList() kwargs['inputs'] = inputs kwargs['max_new_tokens'] = max_length kwargs['repetition_penalty'] = float(1.2) kwargs['stopping_criteria'] = stopping_criteria history.append((now_input, "")) def generate_with_callback(callback=None, **kwargs): kwargs['stopping_criteria'].append(Stream(callback_func=callback)) with torch.no_grad(): application.llm_service.model.generate(**kwargs['inputs'], max_new_tokens=kwargs['max_new_tokens'], repetition_penalty=kwargs['repetition_penalty'], stopping_criteria=kwargs["stopping_criteria"]) def generate_with_streaming(**kwargs): return Iteratorize(generate_with_callback, kwargs, callback=None) with generate_with_streaming(**kwargs) as generator: for output in generator: last = output[-1] output = application.llm_service.tokenizer.decode(output, skip_special_tokens=True) pattern = r"\n{5,}$" pattern2 = r"\s{5,}$" origin_output = output output = output.split("Assistant:")[-1].strip() history[-1] = (now_input, output) yield "", history, history, search_text if last in eos_token_ids or re.search(pattern, origin_output) or re.search(pattern2, origin_output): break with gr.Blocks() as demo: state = gr.State() with gr.Row(): with gr.Column(scale=1.5): github_banner_path = 'https://raw.githubusercontent.com/LIANG-star177/chatgptapi/master/logo.png' gr.HTML(f'<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>') with gr.Row(): intention_reg = gr.CheckboxGroup(["意图识别"], label="自动选择知识库", value=None, interactive=True) with gr.Row(): kg_names = gr.CheckboxGroup(list(config.kg_vector_stores.keys()), label="手动选择知识库", value=None, interactive=True).style(height=200) with gr.Row(): search = gr.Textbox(label='知识库检索结果') with gr.Row(): gr.Markdown("""Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区""") with gr.Row(): gr.Markdown("""免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。""") with gr.Column(scale=4): with gr.Row(): chatbot = gr.Chatbot(label='智海-录问').style(height=500) with gr.Row(): message = gr.Textbox(label='请输入问题') with gr.Row(): clear_history = gr.Button("🧹 清除历史对话") send = gr.Button("🚀 发送") send.click(predict, inputs=[ message, kg_names, state, intention_reg, ], outputs=[message, chatbot, state, search], show_progress=True) clear_history.click(fn=clear_session, inputs=[], outputs=[chatbot, state, search], queue=False) message.submit(predict, inputs=[ message, kg_names, state, intention_reg, ], outputs=[message, chatbot, state, search], show_progress=True) demo.queue(concurrency_count=2).launch( server_name='0.0.0.0', server_port=7888, share=True, enable_queue=True, inbrowser=True, )
[ "langchain.schema.Document" ]
[((1155, 1183), 'clc.langchain_application.LangChainApplication', 'LangChainApplication', (['config'], {}), '(config)\n', (1175, 1183), False, 'from clc.langchain_application import LangChainApplication, torch_gc\n'), ((3620, 3630), 'clc.langchain_application.torch_gc', 'torch_gc', ([], {}), '()\n', (3628, 3630), False, 'from clc.langchain_application import LangChainApplication, torch_gc\n'), ((3863, 3885), 'transformers.StoppingCriteriaList', 'StoppingCriteriaList', ([], {}), '()\n', (3883, 3885), False, 'from transformers import StoppingCriteriaList, StoppingCriteriaList\n'), ((5358, 5369), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (5367, 5369), True, 'import gradio as gr\n'), ((5392, 5402), 'gradio.State', 'gr.State', ([], {}), '()\n', (5400, 5402), True, 'import gradio as gr\n'), ((1763, 1795), 'clc.matching.key_words_match_intention', 'key_words_match_intention', (['input'], {}), '(input)\n', (1788, 1795), False, 'from clc.matching import key_words_match_intention, key_words_match_knowledge\n'), ((4681, 4739), 'clc.callbacks.Iteratorize', 'Iteratorize', (['generate_with_callback', 'kwargs'], {'callback': 'None'}), '(generate_with_callback, kwargs, callback=None)\n', (4692, 4739), False, 'from clc.callbacks import Iteratorize, Stream\n'), ((5412, 5420), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5418, 5420), True, 'import gradio as gr\n'), ((4194, 4224), 'clc.callbacks.Stream', 'Stream', ([], {'callback_func': 'callback'}), '(callback_func=callback)\n', (4200, 4224), False, 'from clc.callbacks import Iteratorize, Stream\n'), ((4239, 4254), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4252, 4254), False, 'import torch\n'), ((5435, 5455), 'gradio.Column', 'gr.Column', ([], {'scale': '(1.5)'}), '(scale=1.5)\n', (5444, 5455), True, 'import gradio as gr\n'), ((5579, 5758), 'gradio.HTML', 'gr.HTML', (['f"""<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>"""'], {}), '(\n f\'<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>\'\n )\n', (5586, 5758), True, 'import gradio as gr\n'), ((6552, 6570), 'gradio.Column', 'gr.Column', ([], {'scale': '(4)'}), '(scale=4)\n', (6561, 6570), True, 'import gradio as gr\n'), ((2568, 2655), 'clc.matching.key_words_match_knowledge', 'key_words_match_knowledge', (['application.all_articles', 'application.choices', 'now_input'], {}), '(application.all_articles, application.choices,\n now_input)\n', (2593, 2655), False, 'from clc.matching import key_words_match_intention, key_words_match_knowledge\n'), ((5257, 5290), 're.search', 're.search', (['pattern', 'origin_output'], {}), '(pattern, origin_output)\n', (5266, 5290), False, 'import re\n'), ((5294, 5328), 're.search', 're.search', (['pattern2', 'origin_output'], {}), '(pattern2, origin_output)\n', (5303, 5328), False, 'import re\n'), ((5766, 5774), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5772, 5774), True, 'import gradio as gr\n'), ((5815, 5888), 'gradio.CheckboxGroup', 'gr.CheckboxGroup', (["['意图识别']"], {'label': '"""自动选择知识库"""', 'value': 'None', 'interactive': '(True)'}), "(['意图识别'], label='自动选择知识库', value=None, interactive=True)\n", (5831, 5888), True, 'import gradio as gr\n'), ((5978, 5986), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5984, 5986), True, 'import gradio as gr\n'), ((6248, 6256), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6254, 6256), True, 'import gradio as gr\n'), ((6283, 6310), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""知识库检索结果"""'}), "(label='知识库检索结果')\n", (6293, 6310), True, 'import gradio as gr\n'), ((6329, 6337), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6335, 6337), True, 'import gradio as gr\n'), ((6355, 6403), 'gradio.Markdown', 'gr.Markdown', (['"""Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区"""'], {}), "('Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区')\n", (6366, 6403), True, 'import gradio as gr\n'), ((6425, 6433), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6431, 6433), True, 'import gradio as gr\n'), ((6451, 6539), 'gradio.Markdown', 'gr.Markdown', (['"""免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。"""'], {}), "(\n '免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。')\n", (6462, 6539), True, 'import gradio as gr\n'), ((6589, 6597), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6595, 6597), True, 'import gradio as gr\n'), ((6698, 6706), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6704, 6706), True, 'import gradio as gr\n'), ((6734, 6759), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""请输入问题"""'}), "(label='请输入问题')\n", (6744, 6759), True, 'import gradio as gr\n'), ((6789, 6797), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6795, 6797), True, 'import gradio as gr\n'), ((6831, 6852), 'gradio.Button', 'gr.Button', (['"""🧹 清除历史对话"""'], {}), "('🧹 清除历史对话')\n", (6840, 6852), True, 'import gradio as gr\n'), ((6876, 6893), 'gradio.Button', 'gr.Button', (['"""🚀 发送"""'], {}), "('🚀 发送')\n", (6885, 6893), True, 'import gradio as gr\n'), ((6625, 6650), 'gradio.Chatbot', 'gr.Chatbot', ([], {'label': '"""智海-录问"""'}), "(label='智海-录问')\n", (6635, 6650), True, 'import gradio as gr\n'), ((2723, 2808), 'langchain.schema.Document', 'Document', ([], {'page_content': 'related_article[0]', 'metadata': "{'value': related_article[1]}"}), "(page_content=related_article[0], metadata={'value':\n related_article[1]})\n", (2731, 2808), False, 'from langchain.schema import Document\n')]
"""This script is used to initialize the Qdrant db backend with Azure OpenAI.""" import os from typing import Any, List, Optional, Tuple import openai from dotenv import load_dotenv from langchain.docstore.document import Document from langchain.text_splitter import NLTKTextSplitter from langchain_community.document_loaders import DirectoryLoader, PyPDFium2Loader from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings from langchain_community.vectorstores import Qdrant from loguru import logger from omegaconf import DictConfig from ultra_simple_config import load_config from agent.utils.utility import generate_prompt from agent.utils.vdb import init_vdb load_dotenv() @load_config(location="config/db.yml") def get_db_connection(open_ai_token: str, cfg: DictConfig, collection_name: str) -> Qdrant: """Initializes a connection to the Qdrant DB. Args: open_ai_token (str): The openai token. cfg (DictConfig): the config file. collection_name (str): The name of the vector database collection. Returns: Qdrant: An Langchain Instance of the Qdrant DB. """ if cfg.openai.azure: embedding = AzureOpenAIEmbeddings(deployment=cfg.openai.deployment, openai_api_version="2023-05-15", openai_api_key=open_ai_token) # type: ignore else: embedding = OpenAIEmbeddings(model=cfg.openai.deployment, openai_api_key=open_ai_token) if collection_name is None or not collection_name: collection_name = cfg.qdrant.collection_name_openai return init_vdb(cfg, collection_name, embedding) def embedd_documents_openai(dir: str, open_ai_token: str, collection_name: Optional[str] = None) -> None: """embedd_documents embedds the documents in the given directory. :param cfg: Configuration from the file :type cfg: DictConfig :param dir: PDF Directory :type dir: str :param open_ai_token: OpenAI API Token :type open_ai_token: str """ vector_db: Qdrant = get_db_connection(open_ai_token=open_ai_token, collection_name=collection_name) splitter = NLTKTextSplitter(chunk_size=500, chunk_overlap=100) loader = DirectoryLoader(dir, glob="*.pdf", loader_cls=PyPDFium2Loader) docs = loader.load_and_split(splitter) logger.info(f"Loaded {len(docs)} documents.") texts = [doc.page_content for doc in docs] metadatas = [doc.metadata for doc in docs] vector_db.add_texts(texts=texts, metadatas=metadatas) logger.info("SUCCESS: Texts embedded.") def search_documents_openai(open_ai_token: str, query: str, amount: int, threshold: float = 0.0, collection_name: Optional[str] = None) -> List[Tuple[Document, float]]: """Searches the documents in the Qdrant DB with a specific query. Args: open_ai_token (str): The OpenAI API token. query (str): The question for which documents should be searched. Returns: List[Tuple[Document, float]]: A list of search results, where each result is a tuple containing a Document object and a float score. """ vector_db = get_db_connection(open_ai_token=open_ai_token, collection_name=collection_name) docs = vector_db.similarity_search_with_score(query, k=amount, score_threshold=threshold) logger.info("SUCCESS: Documents found.") return docs @load_config(location="config/ai/openai.yml") def summarize_text_openai(text: str, token: str, cfg: DictConfig) -> str: """Summarizes the given text using the Luminous API. Args: text (str): The text to be summarized. token (str): The token for the Luminous API. Returns: str: The summary of the text. """ prompt = generate_prompt(prompt_name="openai-summarization.j2", text=text, language="de") openai.api_key = token response = openai.Completion.create( engine=cfg.openai.model, prompt=prompt, temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens, top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty, presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.best_of, stop=cfg.openai.stop, ) return response.choices[0].text @load_config(location="config/ai/openai.yml") def send_completion(text: str, query: str, token: str, cfg: DictConfig) -> str: """Sent completion request to OpenAI API. Args: text (str): The text on which the completion should be based. query (str): The query for the completion. token (str): The token for the OpenAI API. cfg (DictConfig): Returns: str: Response from the OpenAI API. """ prompt = generate_prompt(prompt_name="openai-summarization.j2", text=text, query=query, language="de") openai.api_key = token response = openai.Completion.create( engine=cfg.openai.model, prompt=prompt, temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens, top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty, presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.best_of, stop=cfg.openai.stop, ) return response.choices[0].text def send_custom_completion_openai( token: str, prompt: str, model: str = "gpt3.5", max_tokens: int = 256, stop_sequences: List[str] = ["###"], temperature: float = 0, ) -> str: """Sent completion request to OpenAI API. Args: text (str): The text on which the completion should be based. query (str): The query for the completion. token (str): The token for the OpenAI API. cfg (DictConfig): Returns: str: Response from the OpenAI API. """ openai.api_key = token response = openai.Completion.create( engine=model, prompt=prompt, temperature=temperature, max_tokens=max_tokens, stop_sequences=stop_sequences, ) return response.choices[0].text def qa_openai(token: str, documents: list[tuple[Document, float]], query: str, summarization: bool = False) -> tuple[Any, str, dict[Any, Any]]: """QA Function for OpenAI LLMs. Args: token (str): The token for the OpenAI API. documents (list[tuple[Document, float]]): The documents to be searched. query (str): The question for which the LLM should generate an answer. summarization (bool, optional): If the Documents should be summarized. Defaults to False. Returns: tuple: answer, prompt, meta_data """ # if the list of documents contains only one document extract the text directly if len(documents) == 1: text = documents[0][0].page_content meta_data = documents[0][0].metadata else: # extract the text from the documents texts = [doc[0].page_content for doc in documents] if summarization: # call summarization text = "" for t in texts: text += summarize_text_openai(text=t, token=token) else: # combine the texts to one text text = " ".join(texts) meta_data = [doc[0].metadata for doc in documents] # load the prompt prompt = generate_prompt("aleph_alpha_qa.j2", text=text, query=query) try: # call the luminous api answer = send_completion(prompt, token) except ValueError as e: # if the code is PROMPT_TOO_LONG, split it into chunks if e.args[0] == "PROMPT_TOO_LONG": logger.info("Prompt too long. Summarizing.") # summarize the text short_text = summarize_text_openai(text, token) # generate the prompt prompt = generate_prompt("openai-qa.j2", text=short_text, query=query) # call the luminous api answer = send_completion(prompt, token) # extract the answer return answer, prompt, meta_data if __name__ == "__main__": token = os.getenv("OPENAI_API_KEY") if not token: raise ValueError("OPENAI_API_KEY is not set.") embedd_documents_openai(dir="data", open_ai_token=token) DOCS = search_documents_openai(open_ai_token="", query="Was ist Vanille?", amount=3) print(f"DOCUMENTS: {DOCS}") summary = summarize_text_openai(text="Below is an extract from the annual financial report of a company. ", token=token) print(f"SUMMARY: {summary}")
[ "langchain_community.embeddings.OpenAIEmbeddings", "langchain_community.embeddings.AzureOpenAIEmbeddings", "langchain.text_splitter.NLTKTextSplitter", "langchain_community.document_loaders.DirectoryLoader" ]
[((692, 705), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (703, 705), False, 'from dotenv import load_dotenv\n'), ((709, 746), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/db.yml"""'}), "(location='config/db.yml')\n", (720, 746), False, 'from ultra_simple_config import load_config\n'), ((3319, 3363), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/ai/openai.yml"""'}), "(location='config/ai/openai.yml')\n", (3330, 3363), False, 'from ultra_simple_config import load_config\n'), ((4225, 4269), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/ai/openai.yml"""'}), "(location='config/ai/openai.yml')\n", (4236, 4269), False, 'from ultra_simple_config import load_config\n'), ((1557, 1598), 'agent.utils.vdb.init_vdb', 'init_vdb', (['cfg', 'collection_name', 'embedding'], {}), '(cfg, collection_name, embedding)\n', (1565, 1598), False, 'from agent.utils.vdb import init_vdb\n'), ((2097, 2148), 'langchain.text_splitter.NLTKTextSplitter', 'NLTKTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(100)'}), '(chunk_size=500, chunk_overlap=100)\n', (2113, 2148), False, 'from langchain.text_splitter import NLTKTextSplitter\n'), ((2163, 2225), 'langchain_community.document_loaders.DirectoryLoader', 'DirectoryLoader', (['dir'], {'glob': '"""*.pdf"""', 'loader_cls': 'PyPDFium2Loader'}), "(dir, glob='*.pdf', loader_cls=PyPDFium2Loader)\n", (2178, 2225), False, 'from langchain_community.document_loaders import DirectoryLoader, PyPDFium2Loader\n'), ((2476, 2515), 'loguru.logger.info', 'logger.info', (['"""SUCCESS: Texts embedded."""'], {}), "('SUCCESS: Texts embedded.')\n", (2487, 2515), False, 'from loguru import logger\n'), ((3259, 3299), 'loguru.logger.info', 'logger.info', (['"""SUCCESS: Documents found."""'], {}), "('SUCCESS: Documents found.')\n", (3270, 3299), False, 'from loguru import logger\n'), ((3679, 3764), 'agent.utils.utility.generate_prompt', 'generate_prompt', ([], {'prompt_name': '"""openai-summarization.j2"""', 'text': 'text', 'language': '"""de"""'}), "(prompt_name='openai-summarization.j2', text=text, language='de'\n )\n", (3694, 3764), False, 'from agent.utils.utility import generate_prompt\n'), ((3803, 4122), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'cfg.openai.model', 'prompt': 'prompt', 'temperature': 'cfg.openai.temperature', 'max_tokens': 'cfg.openai.max_tokens', 'top_p': 'cfg.openai.top_p', 'frequency_penalty': 'cfg.openai.frequency_penalty', 'presence_penalty': 'cfg.openai.presence_penalty', 'best_of': 'cfg.openai.best_of', 'stop': 'cfg.openai.stop'}), '(engine=cfg.openai.model, prompt=prompt,\n temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens,\n top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty,\n presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.\n best_of, stop=cfg.openai.stop)\n', (3827, 4122), False, 'import openai\n'), ((4683, 4781), 'agent.utils.utility.generate_prompt', 'generate_prompt', ([], {'prompt_name': '"""openai-summarization.j2"""', 'text': 'text', 'query': 'query', 'language': '"""de"""'}), "(prompt_name='openai-summarization.j2', text=text, query=\n query, language='de')\n", (4698, 4781), False, 'from agent.utils.utility import generate_prompt\n'), ((4820, 5139), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'cfg.openai.model', 'prompt': 'prompt', 'temperature': 'cfg.openai.temperature', 'max_tokens': 'cfg.openai.max_tokens', 'top_p': 'cfg.openai.top_p', 'frequency_penalty': 'cfg.openai.frequency_penalty', 'presence_penalty': 'cfg.openai.presence_penalty', 'best_of': 'cfg.openai.best_of', 'stop': 'cfg.openai.stop'}), '(engine=cfg.openai.model, prompt=prompt,\n temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens,\n top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty,\n presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.\n best_of, stop=cfg.openai.stop)\n', (4844, 5139), False, 'import openai\n'), ((5804, 5941), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'model', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'stop_sequences': 'stop_sequences'}), '(engine=model, prompt=prompt, temperature=\n temperature, max_tokens=max_tokens, stop_sequences=stop_sequences)\n', (5828, 5941), False, 'import openai\n'), ((7267, 7327), 'agent.utils.utility.generate_prompt', 'generate_prompt', (['"""aleph_alpha_qa.j2"""'], {'text': 'text', 'query': 'query'}), "('aleph_alpha_qa.j2', text=text, query=query)\n", (7282, 7327), False, 'from agent.utils.utility import generate_prompt\n'), ((8017, 8044), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (8026, 8044), False, 'import os\n'), ((1188, 1311), 'langchain_community.embeddings.AzureOpenAIEmbeddings', 'AzureOpenAIEmbeddings', ([], {'deployment': 'cfg.openai.deployment', 'openai_api_version': '"""2023-05-15"""', 'openai_api_key': 'open_ai_token'}), "(deployment=cfg.openai.deployment, openai_api_version=\n '2023-05-15', openai_api_key=open_ai_token)\n", (1209, 1311), False, 'from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings\n'), ((1353, 1428), 'langchain_community.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'cfg.openai.deployment', 'openai_api_key': 'open_ai_token'}), '(model=cfg.openai.deployment, openai_api_key=open_ai_token)\n', (1369, 1428), False, 'from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings\n'), ((7566, 7610), 'loguru.logger.info', 'logger.info', (['"""Prompt too long. Summarizing."""'], {}), "('Prompt too long. Summarizing.')\n", (7577, 7610), False, 'from loguru import logger\n'), ((7761, 7822), 'agent.utils.utility.generate_prompt', 'generate_prompt', (['"""openai-qa.j2"""'], {'text': 'short_text', 'query': 'query'}), "('openai-qa.j2', text=short_text, query=query)\n", (7776, 7822), False, 'from agent.utils.utility import generate_prompt\n')]
import sys from langchain.chains.summarize import load_summarize_chain from langchain import OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter() # get transcript file key from args file_key = sys.argv[1] # get transcript text text = open(file_key, "r").read() llm = OpenAI(temperature=0) texts = text_splitter.split_text(text) from langchain.docstore.document import Document docs = [Document(page_content=t) for t in texts] chain = load_summarize_chain(llm, chain_type="map_reduce") output = chain.run(docs) print(output)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.docstore.document.Document", "langchain.chains.summarize.load_summarize_chain", "langchain.OpenAI" ]
[((186, 218), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {}), '()\n', (216, 218), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((344, 365), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (350, 365), False, 'from langchain import OpenAI\n'), ((514, 564), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""map_reduce"""'}), "(llm, chain_type='map_reduce')\n", (534, 564), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((464, 488), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 't'}), '(page_content=t)\n', (472, 488), False, 'from langchain.docstore.document import Document\n')]
from base64 import b64decode import os import textwrap from math import ceil from dotenv import load_dotenv load_dotenv() # take environment variables from .env. from fastapi import FastAPI from pydantic import BaseModel from fastapi.middleware.cors import CORSMiddleware from langchain.prompts import PromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.llms import HuggingFaceHub from langchain_openai.llms import OpenAI from langchain.chains.summarize import load_summarize_chain from langchain.docstore.document import Document # from prompts import full_summary_template, snip_summary_template_with_context, snip_summary_template class SummarizeSnip(BaseModel): title: str summary: str = None transcript: str encoded: bool = True dev = os.getenv("FASTAPI_ENV") == "development" # headers = {"Authorization": "Bearer " + FLOWISE_API_KEY} app = FastAPI(docs_url="/api/llm/docs", redoc_url="/api/llm/redoc", openapi_url="/api/llm/openapi.json") # CORS configuration origins = [ "https://www.youtube.com", "http://localhost:3000", "https://www.sniptube.tech", ] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["GET", "POST", "OPTIONS"], allow_headers=["content-type"] ) @app.get("/api/llm/healthchecker") def healthchecker(): return {"status": "success", "message": "Integrated FastAPI Framework with Next.js and chrome extension successfully!"} @app.post("/api/llm/summarize/snip") async def summarizeSnip(item: SummarizeSnip): # set up model # llm = GPT4All(model=model_path, temp=0.1) # llm = Cohere(model="summarize-xlarge", cohere_api_key=COHERE_API_KEY, temperature=0.1) # OpenAI(temperature=0.6) if not dev else llm = OpenAI(temperature=0.6) if not dev else HuggingFaceHub(repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.6, 'max_new_tokens': 1000 }) if item.encoded: # decode from base64 title = b64decode(item.title).decode("utf-8") text = b64decode(item.transcript).decode("utf-8") summary = b64decode(item.summary).decode("utf-8") if item.summary else None else: title = item.title text = item.transcript summary = item.summary if item.summary else None PROMPT_SNIP_SUMMARY = PromptTemplate(template=snip_summary_template.format(title=title, text='{text}'), input_variables=["text"]) # TODO: refine chain? https://python.langchain.com/docs/modules/chains/popular/summarize#the-refine-chain chain = load_summarize_chain(llm, chain_type="stuff", verbose=True, prompt=PROMPT_SNIP_SUMMARY) # TODO: are metadata necessary? text_document = [Document(page_content=text, metadata={"title": title, "summary": summary, "transcript": text})] summary = chain.invoke({'input_documents': text_document}, return_only_outputs=True)['output_text'].strip() wrapped_summary = textwrap.fill(summary, width=100) return {"summary": wrapped_summary} # ---------------------------PROMPTS---------------------------------------------------------------------------------------------------- snip_summary_template = """You are a youtube section summarizer. Which means you will be given the transcript of a section of a youtube video and you need to summarize that transcript of the youtube video into a concise sentence. The sentence should only describe the main points of the given transcript TRANSCRIPT OF SECTION OF VIDEO TO CONCISELY SUMMARIZE: {text} CONCISE SUMMARIZED SENTENCE FROM TRANSCRIPT(only write one sentence): """
[ "langchain_community.llms.HuggingFaceHub", "langchain.chains.summarize.load_summarize_chain", "langchain.docstore.document.Document", "langchain_openai.llms.OpenAI" ]
[((109, 122), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (120, 122), False, 'from dotenv import load_dotenv\n'), ((930, 1033), 'fastapi.FastAPI', 'FastAPI', ([], {'docs_url': '"""/api/llm/docs"""', 'redoc_url': '"""/api/llm/redoc"""', 'openapi_url': '"""/api/llm/openapi.json"""'}), "(docs_url='/api/llm/docs', redoc_url='/api/llm/redoc', openapi_url=\n '/api/llm/openapi.json')\n", (937, 1033), False, 'from fastapi import FastAPI\n'), ((822, 846), 'os.getenv', 'os.getenv', (['"""FASTAPI_ENV"""'], {}), "('FASTAPI_ENV')\n", (831, 846), False, 'import os\n'), ((2603, 2695), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""stuff"""', 'verbose': '(True)', 'prompt': 'PROMPT_SNIP_SUMMARY'}), "(llm, chain_type='stuff', verbose=True, prompt=\n PROMPT_SNIP_SUMMARY)\n", (2623, 2695), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2987, 3020), 'textwrap.fill', 'textwrap.fill', (['summary'], {'width': '(100)'}), '(summary, width=100)\n', (3000, 3020), False, 'import textwrap\n'), ((1819, 1842), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.6)'}), '(temperature=0.6)\n', (1825, 1842), False, 'from langchain_openai.llms import OpenAI\n'), ((1859, 1974), 'langchain_community.llms.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': '"""tiiuae/falcon-7b-instruct"""', 'model_kwargs': "{'temperature': 0.6, 'max_new_tokens': 1000}"}), "(repo_id='tiiuae/falcon-7b-instruct', model_kwargs={\n 'temperature': 0.6, 'max_new_tokens': 1000})\n", (1873, 1974), False, 'from langchain_community.llms import HuggingFaceHub\n'), ((2748, 2846), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': "{'title': title, 'summary': summary, 'transcript': text}"}), "(page_content=text, metadata={'title': title, 'summary': summary,\n 'transcript': text})\n", (2756, 2846), False, 'from langchain.docstore.document import Document\n'), ((2042, 2063), 'base64.b64decode', 'b64decode', (['item.title'], {}), '(item.title)\n', (2051, 2063), False, 'from base64 import b64decode\n'), ((2095, 2121), 'base64.b64decode', 'b64decode', (['item.transcript'], {}), '(item.transcript)\n', (2104, 2121), False, 'from base64 import b64decode\n'), ((2156, 2179), 'base64.b64decode', 'b64decode', (['item.summary'], {}), '(item.summary)\n', (2165, 2179), False, 'from base64 import b64decode\n')]
from typing import Any, Dict, List, Union from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.messages import BaseMessage, get_buffer_string class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory inside a limited size window.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 """Number of messages to store in buffer.""" @property def buffer(self) -> Union[str, List[BaseMessage]]: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer}
[ "langchain.schema.messages.get_buffer_string" ]
[((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')]
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser from langchain.prompts import StringPromptTemplate from langchain import OpenAI, SerpAPIWrapper, LLMChain from typing import List, Union from langchain.schema import AgentAction, AgentFinish import re from langchain.utilities import BashProcess from langchain.tools.human.tool import HumanInputRun from langchain_tools.cwtool import CloudWatchInsightQuery import sys # Set up a prompt template class CustomPromptTemplate(StringPromptTemplate): # The template to use template: str # The list of tools available tools: List[Tool] def format(self, **kwargs) -> str: # Get the intermediate steps (AgentAction, Observation tuples) # Format them in a particular way intermediate_steps = kwargs.pop("intermediate_steps") thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\nObservation: {observation}\nThought: " # Set the agent_scratchpad variable to that value kwargs["agent_scratchpad"] = thoughts # Create a tools variable from the list of tools provided kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools]) # Create a list of tool names for the tools provided kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools]) return self.template.format(**kwargs) class CustomOutputParser(AgentOutputParser): def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]: # Check if agent should finish if "Final Answer:" in llm_output: return AgentFinish( # Return values is generally always a dictionary with a single `output` key # It is not recommended to try anything else at the moment :) return_values={"output": llm_output.split("Final Answer:")[-1].strip()}, log=llm_output, ) # Parse out the action and action input regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)" match = re.search(regex, llm_output, re.DOTALL) if not match: raise ValueError(f"Could not parse LLM output: `{llm_output}`") action = match.group(1).strip() action_input = match.group(2) # Return the action and action input return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output) def agent_run(user_input) -> None: # Define which tools the agent can use to answer user queries search = SerpAPIWrapper() bash = BashProcess() human = HumanInputRun() cloudwatch = CloudWatchInsightQuery() tools = [ Tool( name = "Search", func=search.run, description="useful for when you need to answer questions about current events" ), Tool( name = "human", func=human.run, description="useful for when you need to ask the human for input" ), Tool( name = "cloudwatch", func=cloudwatch.run, description="useful for when you need run an AWS cloudwatch insight query or search logs in AWS" ), Tool( name = "terminal", func=bash.run, description="useful for when you need to run commands in a terminal" ) ] # Set up the base template template = """You are a cyber Security professional. You will take steps to achieve the requested task. You have access to the following tools: {tools} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question Question: {input} {agent_scratchpad}""" prompt = CustomPromptTemplate(template=template,tools=tools,input_variables=["input", "intermediate_steps"]) output_parser = CustomOutputParser() llm = OpenAI(temperature=0) llm_chain = LLMChain(llm=llm, prompt=prompt) tool_names = [tool.name for tool in tools] agent = LLMSingleActionAgent( llm_chain=llm_chain, output_parser=output_parser, stop=["\nObservation:"], allowed_tools=tool_names ) agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True) agent_executor.run(user_input)
[ "langchain.tools.human.tool.HumanInputRun", "langchain_tools.cwtool.CloudWatchInsightQuery", "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.agents.LLMSingleActionAgent", "langchain.utilities.BashProcess", "langchain.SerpAPIWrapper", "langchain.LLMChain", "langchain.OpenAI", "langchain.agents.Tool" ]
[((2630, 2646), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (2644, 2646), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((2658, 2671), 'langchain.utilities.BashProcess', 'BashProcess', ([], {}), '()\n', (2669, 2671), False, 'from langchain.utilities import BashProcess\n'), ((2684, 2699), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2697, 2699), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2717, 2741), 'langchain_tools.cwtool.CloudWatchInsightQuery', 'CloudWatchInsightQuery', ([], {}), '()\n', (2739, 2741), False, 'from langchain_tools.cwtool import CloudWatchInsightQuery\n'), ((4330, 4351), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (4336, 4351), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((4368, 4400), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (4376, 4400), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((4461, 4587), 'langchain.agents.LLMSingleActionAgent', 'LLMSingleActionAgent', ([], {'llm_chain': 'llm_chain', 'output_parser': 'output_parser', 'stop': "['\\nObservation:']", 'allowed_tools': 'tool_names'}), "(llm_chain=llm_chain, output_parser=output_parser, stop\n =['\\nObservation:'], allowed_tools=tool_names)\n", (4481, 4587), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((4645, 4719), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': '(True)'}), '(agent=agent, tools=tools, verbose=True)\n', (4679, 4719), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((2150, 2189), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (2159, 2189), False, 'import re\n'), ((2765, 2887), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to answer questions about current events"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to answer questions about current events')\n", (2769, 2887), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((2940, 3046), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""human"""', 'func': 'human.run', 'description': '"""useful for when you need to ask the human for input"""'}), "(name='human', func=human.run, description=\n 'useful for when you need to ask the human for input')\n", (2944, 3046), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((3099, 3251), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""cloudwatch"""', 'func': 'cloudwatch.run', 'description': '"""useful for when you need run an AWS cloudwatch insight query or search logs in AWS"""'}), "(name='cloudwatch', func=cloudwatch.run, description=\n 'useful for when you need run an AWS cloudwatch insight query or search logs in AWS'\n )\n", (3103, 3251), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((3299, 3410), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""terminal"""', 'func': 'bash.run', 'description': '"""useful for when you need to run commands in a terminal"""'}), "(name='terminal', func=bash.run, description=\n 'useful for when you need to run commands in a terminal')\n", (3303, 3410), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n')]
# coding: utf-8 import os import gradio as gr import re import uuid from PIL import Image, ImageDraw, ImageOps, ImageFont import numpy as np import argparse import inspect from langchain.agents.initialize import initialize_agent from langchain.agents.tools import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from gpt4tools.llm import LlamaLangChain from gpt4tools.tools import * GPT4TOOLS_PREFIX = """GPT4Tools can handle various text and visual tasks, such as answering questions and providing in-depth explanations and discussions. It generates human-like text and uses tools to indirectly understand images. When referring to images, GPT4Tools follows strict file name rules. To complete visual tasks, GPT4Tools uses tools and stays loyal to observation outputs. Users can provide new images to GPT4Tools with a description, but tools must be used for subsequent tasks. TOOLS: ------ GPT4Tools has access to the following tools:""" GPT4TOOLS_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format: ``` Thought: Do I need to use a tool? Yes Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ``` When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: ``` Thought: Do I need to use a tool? No {ai_prefix}: [your response here] ``` """ GPT4TOOLS_SUFFIX = """Follow file name rules and do not fake non-existent file names. Remember to provide the image file name loyally from the last tool observation. Previous conversation: {chat_history} New input: {input} GPT4Tools needs to use tools to observe images, not directly imagine them. Thoughts and observations in the conversation are only visible to GPT4Tools. When answering human questions, repeat important information. Let's think step by step. {agent_scratchpad}""" os.makedirs('image', exist_ok=True) def cut_dialogue_history(history_memory, keep_last_n_paragraphs=1): if history_memory is None or len(history_memory) == 0: return history_memory paragraphs = history_memory.split('Human:') if len(paragraphs) <= keep_last_n_paragraphs: return history_memory return 'Human:' + 'Human:'.join(paragraphs[-1:]) class ConversationBot: def __init__(self, load_dict, llm_kwargs): # load_dict = {'VisualQuestionAnswering':'cuda:0', 'ImageCaptioning':'cuda:1',...} print(f"Initializing GPT4Tools, load_dict={load_dict}") if 'ImageCaptioning' not in load_dict: raise ValueError("You have to load ImageCaptioning as a basic function for GPT4Tools") self.models = {} # Load Basic Foundation Models for class_name, device in load_dict.items(): self.models[class_name] = globals()[class_name](device=device) # Load Template Foundation Models for class_name, module in globals().items(): if getattr(module, 'template_model', False): template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'} loaded_names = set([type(e).__name__ for e in self.models.values()]) if template_required_names.issubset(loaded_names): self.models[class_name] = globals()[class_name]( **{name: self.models[name] for name in template_required_names}) print(f"All the Available Functions: {self.models}") self.tools = [] for instance in self.models.values(): for e in dir(instance): if e.startswith('inference'): func = getattr(instance, e) self.tools.append(Tool(name=func.name, description=func.description, func=func)) self.llm = LlamaLangChain(model_kwargs=llm_kwargs) self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output') def init_agent(self, lang): self.memory.clear() #clear previous history if lang=='English': PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = GPT4TOOLS_PREFIX, GPT4TOOLS_FORMAT_INSTRUCTIONS, GPT4TOOLS_SUFFIX place = "Enter text and press enter, or upload an image" label_clear = "Clear" else: raise NotImplementedError(f'{lang} is not supported yet') self.agent = initialize_agent( self.tools, self.llm, agent="conversational-react-description", verbose=True, memory=self.memory, return_intermediate_steps=True, agent_kwargs={'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX}, ) return gr.update(visible = True), gr.update(visible = False), gr.update(placeholder=place), gr.update(value=label_clear) def run_text(self, text, state, temperature, top_p, max_new_tokens, keep_last_n_paragraphs): self.llm.set_llm_params(temperature=temperature, top_p=top_p, max_new_tokens=max_new_tokens) self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_paragraphs) res = self.agent({"input": text.strip()}) res['output'] = res['output'].replace("\\", "/") response = re.sub('(image/[-\w]*.png)', lambda m: f'![](file={m.group(0)})*{m.group(0)}*', res['output']) state = state + [(text, response)] print(f"\nProcessed run_text, Input text: {text}\nCurrent state: {state}\n" f"Current Memory: {self.agent.memory.buffer}") image_filenames = re.findall('image/.*.png', str(self.agent.memory.buffer)) image_filename = image_filenames[-1] if len(image_filenames) > 0 else '' return state, state, f'{image_filename} ' def run_image(self, image, state, txt, lang='English'): if image is None: return state, state, txt image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png") print("======>Auto Resize Image...") img = image width, height = img.size ratio = min(512 / width, 512 / height) width_new, height_new = (round(width * ratio), round(height * ratio)) width_new = int(np.round(width_new / 64.0)) * 64 height_new = int(np.round(height_new / 64.0)) * 64 img = img.resize((width_new, height_new)) img = img.convert('RGB') img.save(image_filename, "PNG") print(f"Resize image form {width}x{height} to {width_new}x{height_new}") description = self.models['ImageCaptioning'].inference(image_filename) if lang == 'English': Human_prompt = f'\nHuman: Provide an image named {image_filename}. The description is: {description}. Understand the image using tools.\n' AI_prompt = "Received." else: raise NotImplementedError(f'{lang} is not supported yet') self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt state = state + [(f"![](file={image_filename})*{image_filename}*", AI_prompt)] print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n" f"Current Memory: {self.agent.memory.buffer}") return state, state, f'{image_filename} {txt}' if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--base_model', type=str, required=True, help='folder path to the vicuna with tokenizer') parser.add_argument('--lora_model', type=str, required=True, help='folder path to the lora model') parser.add_argument('--load', type=str, default='ImageCaptioning_cuda:0,Text2Image_cuda:0') parser.add_argument('--llm_device', type=str, default='cpu', help='device to run the llm model') parser.add_argument('--temperature', type=float, default=0.1, help='temperature for the llm model') parser.add_argument('--max_new_tokens', type=int, default=512, help='max number of new tokens to generate') parser.add_argument('--top_p', type=float, default=0.75, help='top_p for the llm model') parser.add_argument('--top_k', type=int, default=40, help='top_k for the llm model') parser.add_argument('--num_beams', type=int, default=1, help='num_beams for the llm model') parser.add_argument('--keep_last_n_paragraphs', type=int, default=1, help='keep last n paragraphs in the memory') parser.add_argument('--cache-dir', type=str, default=None, help="cache path to save model") parser.add_argument('--server-name', type=str, default='0.0.0.0', help="gradio sever name") parser.add_argument('--server-port', type=int, default=8888, help="gradio server port") parser.add_argument('--share', action="store_true") args = parser.parse_args() load_dict = {e.split('_')[0].strip(): e.split('_')[1].strip() for e in args.load.split(',')} llm_kwargs = {'base_model': args.base_model, 'lora_model': args.lora_model, 'device': args.llm_device, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'num_beams': args.num_beams, 'cache_dir': args.cache_dir,} bot = ConversationBot(load_dict=load_dict, llm_kwargs=llm_kwargs) examples = [ ['asserts/images/example-1.jpg','Make the image look like a cartoon.'], ['asserts/images/example-2.jpg','Segment the tie in the image.'], ['asserts/images/example-3.jpg','Generate a man watching a sea based on the pose of the woman.'], ['asserts/images/example-4.jpg','Tell me a story about this image.'], ] with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=0.3): with gr.Row(): image = gr.Image(type="pil", label="input image") with gr.Row(): txt = gr.Textbox(lines=7, show_label=False, elem_id="textbox", placeholder="Enter text and press submit, or upload an image").style(container=False) with gr.Row(): submit = gr.Button("Submit") with gr.Row(): clear = gr.Button("Clear") with gr.Row(): keep_last_n_paragraphs = gr.Slider( minimum=0, maximum=3, value=args.keep_last_n_paragraphs, step=1, interactive=True, label="Remember Last N Paragraphs") max_new_token = gr.Slider( minimum=128, maximum=1024, value=args.max_new_tokens, step=64, interactive=True, label="Max New Tokens") temperature = gr.Slider( minimum=0.0, maximum=1.0, value=args.temperature, step=0.1, interactive=True, label="Temperature") top_p = gr.Slider( minimum=0.0, maximum=1.0, value=args.top_p, step=0.1, interactive=True, label="Top P") with gr.Column(scale=0.7): chatbot = gr.Chatbot(elem_id="chatbot", label="🦙 GPT4Tools").style(height=690) state = gr.State([]) # TODO: support more language bot.init_agent('English') txt.submit(bot.run_text, [txt, state], [chatbot, state]) txt.submit(lambda: "", None, txt) # submit.click(bot.run_image, [image, state, txt], [chatbot, state, txt]).then( # bot.run_text, [txt, state, temperature, top_p, max_new_token, keep_last_n_paragraphs], [chatbot, state]).then( # lambda: "", None, txt).then( # lambda: None, None, image) submit.click(bot.run_image, [image, state, txt], [chatbot, state, txt]).then( bot.run_text, [txt, state, temperature, top_p, max_new_token, keep_last_n_paragraphs], [chatbot, state, txt]).then( lambda: None, None, image) clear.click(bot.memory.clear) clear.click(lambda: [], None, chatbot) clear.click(lambda: [], None, state) with gr.Row(): gr.Examples( examples=examples, inputs=[image, txt], ) demo.launch(server_name=args.server_name, server_port=args.server_port, enable_queue=True, share=args.share)
[ "langchain.chains.conversation.memory.ConversationBufferMemory", "langchain.agents.initialize.initialize_agent", "langchain.agents.tools.Tool" ]
[((1924, 1959), 'os.makedirs', 'os.makedirs', (['"""image"""'], {'exist_ok': '(True)'}), "('image', exist_ok=True)\n", (1935, 1959), False, 'import os\n'), ((7453, 7478), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7476, 7478), False, 'import argparse\n'), ((3840, 3879), 'gpt4tools.llm.LlamaLangChain', 'LlamaLangChain', ([], {'model_kwargs': 'llm_kwargs'}), '(model_kwargs=llm_kwargs)\n', (3854, 3879), False, 'from gpt4tools.llm import LlamaLangChain\n'), ((3903, 3975), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (3927, 3975), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((4413, 4667), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX\n }"}), "(self.tools, self.llm, agent=\n 'conversational-react-description', verbose=True, memory=self.memory,\n return_intermediate_steps=True, agent_kwargs={'prefix': PREFIX,\n 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX})\n", (4429, 4667), False, 'from langchain.agents.initialize import initialize_agent\n'), ((9840, 9851), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (9849, 9851), True, 'import gradio as gr\n'), ((4783, 4806), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (4792, 4806), True, 'import gradio as gr\n'), ((4810, 4834), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (4819, 4834), True, 'import gradio as gr\n'), ((4838, 4866), 'gradio.update', 'gr.update', ([], {'placeholder': 'place'}), '(placeholder=place)\n', (4847, 4866), True, 'import gradio as gr\n'), ((4868, 4896), 'gradio.update', 'gr.update', ([], {'value': 'label_clear'}), '(value=label_clear)\n', (4877, 4896), True, 'import gradio as gr\n'), ((9874, 9882), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9880, 9882), True, 'import gradio as gr\n'), ((12740, 12748), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (12746, 12748), True, 'import gradio as gr\n'), ((12762, 12813), 'gradio.Examples', 'gr.Examples', ([], {'examples': 'examples', 'inputs': '[image, txt]'}), '(examples=examples, inputs=[image, txt])\n', (12773, 12813), True, 'import gradio as gr\n'), ((6340, 6366), 'numpy.round', 'np.round', (['(width_new / 64.0)'], {}), '(width_new / 64.0)\n', (6348, 6366), True, 'import numpy as np\n'), ((6398, 6425), 'numpy.round', 'np.round', (['(height_new / 64.0)'], {}), '(height_new / 64.0)\n', (6406, 6425), True, 'import numpy as np\n'), ((9901, 9921), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)'}), '(scale=0.3)\n', (9910, 9921), True, 'import gradio as gr\n'), ((11640, 11660), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (11649, 11660), True, 'import gradio as gr\n'), ((11781, 11793), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (11789, 11793), True, 'import gradio as gr\n'), ((9944, 9952), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9950, 9952), True, 'import gradio as gr\n'), ((9982, 10023), 'gradio.Image', 'gr.Image', ([], {'type': '"""pil"""', 'label': '"""input image"""'}), "(type='pil', label='input image')\n", (9990, 10023), True, 'import gradio as gr\n'), ((10045, 10053), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10051, 10053), True, 'import gradio as gr\n'), ((10282, 10290), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10288, 10290), True, 'import gradio as gr\n'), ((10321, 10340), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (10330, 10340), True, 'import gradio as gr\n'), ((10362, 10370), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10368, 10370), True, 'import gradio as gr\n'), ((10400, 10418), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (10409, 10418), True, 'import gradio as gr\n'), ((10440, 10448), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10446, 10448), True, 'import gradio as gr\n'), ((10495, 10627), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0)', 'maximum': '(3)', 'value': 'args.keep_last_n_paragraphs', 'step': '(1)', 'interactive': '(True)', 'label': '"""Remember Last N Paragraphs"""'}), "(minimum=0, maximum=3, value=args.keep_last_n_paragraphs, step=1,\n interactive=True, label='Remember Last N Paragraphs')\n", (10504, 10627), True, 'import gradio as gr\n'), ((10805, 10923), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(128)', 'maximum': '(1024)', 'value': 'args.max_new_tokens', 'step': '(64)', 'interactive': '(True)', 'label': '"""Max New Tokens"""'}), "(minimum=128, maximum=1024, value=args.max_new_tokens, step=64,\n interactive=True, label='Max New Tokens')\n", (10814, 10923), True, 'import gradio as gr\n'), ((11099, 11211), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': 'args.temperature', 'step': '(0.1)', 'interactive': '(True)', 'label': '"""Temperature"""'}), "(minimum=0.0, maximum=1.0, value=args.temperature, step=0.1,\n interactive=True, label='Temperature')\n", (11108, 11211), True, 'import gradio as gr\n'), ((11381, 11482), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': 'args.top_p', 'step': '(0.1)', 'interactive': '(True)', 'label': '"""Top P"""'}), "(minimum=0.0, maximum=1.0, value=args.top_p, step=0.1, interactive\n =True, label='Top P')\n", (11390, 11482), True, 'import gradio as gr\n'), ((3758, 3819), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'func.name', 'description': 'func.description', 'func': 'func'}), '(name=func.name, description=func.description, func=func)\n', (3762, 3819), False, 'from langchain.agents.tools import Tool\n'), ((11688, 11738), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""🦙 GPT4Tools"""'}), "(elem_id='chatbot', label='🦙 GPT4Tools')\n", (11698, 11738), True, 'import gradio as gr\n'), ((6068, 6080), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6078, 6080), False, 'import uuid\n'), ((10081, 10205), 'gradio.Textbox', 'gr.Textbox', ([], {'lines': '(7)', 'show_label': '(False)', 'elem_id': '"""textbox"""', 'placeholder': '"""Enter text and press submit, or upload an image"""'}), "(lines=7, show_label=False, elem_id='textbox', placeholder=\n 'Enter text and press submit, or upload an image')\n", (10091, 10205), True, 'import gradio as gr\n'), ((3072, 3106), 'inspect.signature', 'inspect.signature', (['module.__init__'], {}), '(module.__init__)\n', (3089, 3106), False, 'import inspect\n')]
import json from pydantic import BaseModel, Field from pydantic import BaseModel, Field from langchain.llms.base import BaseLLM from typing import List, Any from langchain import LLMChain from llm.generate_task_plan.prompt import get_template from llm.list_output_parser import LLMListOutputParser class Task(BaseModel): """Task model.""" id: int = Field(..., description="Task ID") description: str = Field(..., description="Task description") is_done: bool = Field(False, description="Task done or not") result: str = Field("", description="The result of the task") class TaskManeger(BaseModel): """Task manager model.""" tasks: List[Task] = Field([], description="The list of tasks") current_task_id: int = Field(1, description="The last task id") llm: BaseLLM = Field(..., description="llm class for the agent") def generate_task_plan(self, name: str, role: str, goal: str): """Generate a task plan for the agent.""" propmt = get_template() llm_chain = LLMChain(prompt=propmt, llm=self.llm) try: result = llm_chain.predict( name=name, role=role, goal=goal ) except Exception as e: raise Exception(f"Error: {e}") # Parse and validate the result try: result_list = LLMListOutputParser.parse(result, separeted_string="\t") except Exception as e: raise Exception("Error: " + str(e)) # Add tasks with a serial number for i, e in enumerate(result_list, start=1): id = int(i) description = e self.tasks.append(Task(id=id, description=description)) self def get_task_by_id(self, id: int) -> Task: """Get a task by Task id.""" for task in self.tasks: if task.id == id: return task return None def get_current_task(self) -> Task: """Get the current task agent is working on.""" return self.get_task_by_id(self.current_task_id) def get_current_task_string(self) -> str: """Get the current task agent is working on as a string.""" task = self.get_current_task() if task is None: return None else: return self._task_to_string(task) def complete_task(self, id: int, result: str) -> None: """Complete a task by Task id.""" # Complete the task specified by ID self.tasks[id - 1].is_done = True self.tasks[id - 1].result = result self.current_task_id += 1 def complete_current_task(self, result: str) -> None: """Complete the current task agent is working on.""" self.complete_task(self.current_task_id, result=result) def _task_to_string(self, task: Task) -> str: """Convert a task to a string.""" return f"{task.id}: {task.description}" def get_incomplete_tasks(self) -> List[Task]: """Get the list of incomplete tasks.""" return [task for task in self.tasks if not task.is_done] def get_incomplete_tasks_string(self) -> str: """Get the list of incomplete tasks as a string.""" result = "" for task in self.get_incomplete_tasks(): result += self._task_to_string(task) + "\n" return result
[ "langchain.LLMChain" ]
[((359, 392), 'pydantic.Field', 'Field', (['...'], {'description': '"""Task ID"""'}), "(..., description='Task ID')\n", (364, 392), False, 'from pydantic import BaseModel, Field\n'), ((416, 458), 'pydantic.Field', 'Field', (['...'], {'description': '"""Task description"""'}), "(..., description='Task description')\n", (421, 458), False, 'from pydantic import BaseModel, Field\n'), ((479, 523), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""Task done or not"""'}), "(False, description='Task done or not')\n", (484, 523), False, 'from pydantic import BaseModel, Field\n'), ((542, 589), 'pydantic.Field', 'Field', (['""""""'], {'description': '"""The result of the task"""'}), "('', description='The result of the task')\n", (547, 589), False, 'from pydantic import BaseModel, Field\n'), ((676, 718), 'pydantic.Field', 'Field', (['[]'], {'description': '"""The list of tasks"""'}), "([], description='The list of tasks')\n", (681, 718), False, 'from pydantic import BaseModel, Field\n'), ((746, 786), 'pydantic.Field', 'Field', (['(1)'], {'description': '"""The last task id"""'}), "(1, description='The last task id')\n", (751, 786), False, 'from pydantic import BaseModel, Field\n'), ((806, 855), 'pydantic.Field', 'Field', (['...'], {'description': '"""llm class for the agent"""'}), "(..., description='llm class for the agent')\n", (811, 855), False, 'from pydantic import BaseModel, Field\n'), ((991, 1005), 'llm.generate_task_plan.prompt.get_template', 'get_template', ([], {}), '()\n', (1003, 1005), False, 'from llm.generate_task_plan.prompt import get_template\n'), ((1026, 1063), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'propmt', 'llm': 'self.llm'}), '(prompt=propmt, llm=self.llm)\n', (1034, 1063), False, 'from langchain import LLMChain\n'), ((1366, 1422), 'llm.list_output_parser.LLMListOutputParser.parse', 'LLMListOutputParser.parse', (['result'], {'separeted_string': '"""\t"""'}), "(result, separeted_string='\\t')\n", (1391, 1422), False, 'from llm.list_output_parser import LLMListOutputParser\n')]
# Ingest Documents into a Zep Collection import os from dotenv import find_dotenv, load_dotenv from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import WebBaseLoader from zep_python import ZepClient from zep_python.langchain.vectorstore import ZepVectorStore load_dotenv(dotenv_path=find_dotenv()) SOURCE = "https://en.wikipedia.org/wiki/Leonard_Bernstein" # noqa: E501 ZEP_API_URL = os.environ.get( "ZEP_API_URL" ) # only required if you're using Zep Open Source ZEP_API_KEY = os.environ.get("ZEP_API_KEY") # Required for Zep Cloud if ZEP_API_KEY is None: raise ValueError( "ZEP_API_KEY is required for Zep Cloud. " "Remove this check if using Zep Open Source." ) ZEP_COLLECTION_NAME = os.environ.get("ZEP_COLLECTION_NAME") if ZEP_COLLECTION_NAME is None: raise ValueError("ZEP_COLLECTION_NAME is required for ingestion. ") zep = ZepClient( api_key=ZEP_API_KEY, api_url=ZEP_API_URL, # only required if you're using Zep Open Source ) # Load loader = WebBaseLoader(SOURCE) data = loader.load() print(f"Loaded: {len(data)} documents") # Split text_splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=200) all_splits = text_splitter.split_documents(data) print(f"Adding {len(all_splits)} documents to {ZEP_COLLECTION_NAME}...") # Add to vectorDB vectorstore = ZepVectorStore.from_documents( documents=all_splits, collection_name=ZEP_COLLECTION_NAME, zep_client=zep, ) print(f"Added {len(all_splits)} documents to {ZEP_COLLECTION_NAME}...")
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain_community.document_loaders.WebBaseLoader" ]
[((449, 478), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_URL"""'], {}), "('ZEP_API_URL')\n", (463, 478), False, 'import os\n'), ((549, 578), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_KEY"""'], {}), "('ZEP_API_KEY')\n", (563, 578), False, 'import os\n'), ((784, 821), 'os.environ.get', 'os.environ.get', (['"""ZEP_COLLECTION_NAME"""'], {}), "('ZEP_COLLECTION_NAME')\n", (798, 821), False, 'import os\n'), ((933, 984), 'zep_python.ZepClient', 'ZepClient', ([], {'api_key': 'ZEP_API_KEY', 'api_url': 'ZEP_API_URL'}), '(api_key=ZEP_API_KEY, api_url=ZEP_API_URL)\n', (942, 984), False, 'from zep_python import ZepClient\n'), ((1062, 1083), 'langchain_community.document_loaders.WebBaseLoader', 'WebBaseLoader', (['SOURCE'], {}), '(SOURCE)\n', (1075, 1083), False, 'from langchain_community.document_loaders import WebBaseLoader\n'), ((1171, 1236), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(400)', 'chunk_overlap': '(200)'}), '(chunk_size=400, chunk_overlap=200)\n', (1201, 1236), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1393, 1502), 'zep_python.langchain.vectorstore.ZepVectorStore.from_documents', 'ZepVectorStore.from_documents', ([], {'documents': 'all_splits', 'collection_name': 'ZEP_COLLECTION_NAME', 'zep_client': 'zep'}), '(documents=all_splits, collection_name=\n ZEP_COLLECTION_NAME, zep_client=zep)\n', (1422, 1502), False, 'from zep_python.langchain.vectorstore import ZepVectorStore\n'), ((345, 358), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import find_dotenv, load_dotenv\n')]
#model_settings.py import streamlit as st from langchain.embeddings.huggingface import HuggingFaceEmbeddings from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext from llama_index.logger import LlamaLogger from langchain.chat_models import ChatOpenAI from langchain import OpenAI from enum import Enum class sentenceTransformers(Enum): OPTION1 = "sentence-transformers/all-MiniLM-L6-v2" #default OPTION2 = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2" OPTION3 = "sentence-transformers/all-mpnet-base-v2" def get_sentence_transformer_dropdown(): options = [e.value for e in sentenceTransformers] selected_option = st.selectbox("Sentence transformer:", options) return selected_option def get_embed_model(provider='Langchain', model_name=sentenceTransformers.OPTION1.value): # load in HF embedding model from langchain embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name=model_name)) if provider=='Langchain' else OpenAIEmbedding() return embed_model def get_prompt_helper(): # define prompt helper max_input_size = 4096 num_output = 2048 max_chunk_overlap = 20 prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) return prompt_helper def get_llm_predictor(): # define LLM num_output = 2048 #llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=num_output)) llm_predictor = LLMPredictor(ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo", max_tokens=num_output)) return llm_predictor @st.cache_resource def get_logger(): llama_logger = LlamaLogger() return llama_logger def get_service_context(llm_predictor=get_llm_predictor(), embed_model=get_embed_model(), prompt_helper=get_prompt_helper(), chunk_size_limit=512, llama_logger=get_logger()): return ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, chunk_size_limit=chunk_size_limit, llama_logger=llama_logger)
[ "langchain.embeddings.huggingface.HuggingFaceEmbeddings", "langchain.chat_models.ChatOpenAI" ]
[((705, 751), 'streamlit.selectbox', 'st.selectbox', (['"""Sentence transformer:"""', 'options'], {}), "('Sentence transformer:', options)\n", (717, 751), True, 'import streamlit as st\n'), ((1220, 1279), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1232, 1279), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1684, 1697), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (1695, 1697), False, 'from llama_index.logger import LlamaLogger\n'), ((2009, 2192), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model', 'prompt_helper': 'prompt_helper', 'chunk_size_limit': 'chunk_size_limit', 'llama_logger': 'llama_logger'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model, prompt_helper=prompt_helper, chunk_size_limit=\n chunk_size_limit, llama_logger=llama_logger)\n', (2037, 2192), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1031, 1048), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1046, 1048), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1522, 1600), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'num_output'}), "(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=num_output)\n", (1532, 1600), False, 'from langchain.chat_models import ChatOpenAI\n'), ((955, 999), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (976, 999), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n')]
import os from dotenv import load_dotenv import streamlit as st from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains import ConversationalRetrievalChain from langchain.agents.agent_toolkits import create_retriever_tool from langchain.agents.agent_toolkits import create_conversational_retrieval_agent from langchain.callbacks import StreamlitCallbackHandler from langchain.tools import BaseTool, Tool, tool from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import ChatMessage from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain import PromptTemplate, LLMChain from langchain.vectorstores import LanceDB import lancedb import pandas as pd from langchain.chains import RetrievalQA st.set_page_config(page_title="GlobeBotter", page_icon="🎬") st.header('🎬 Welcome to MovieHarbor, your favourite movie recommender') load_dotenv() #os.environ["HUGGINGFACEHUB_API_TOKEN"] openai_api_key = os.environ['OPENAI_API_KEY'] embeddings = OpenAIEmbeddings() uri = "data/sample-lancedb" db = lancedb.connect(uri) table = db.open_table('movies') docsearch = LanceDB(connection = table, embedding = embeddings) # Import the movie dataset md = pd.read_pickle('movies.pkl') # Create a sidebar for user input st.sidebar.title("Movie Recommendation System") st.sidebar.markdown("Please enter your details and preferences below:") # Ask the user for age, gender and favourite movie genre age = st.sidebar.slider("What is your age?", 1, 100, 25) gender = st.sidebar.radio("What is your gender?", ("Male", "Female", "Other")) genre = st.sidebar.selectbox("What is your favourite movie genre?", md.explode('genres')["genres"].unique()) # Filter the movies based on the user input df_filtered = md[md['genres'].apply(lambda x: genre in x)] template_prefix = """You are a movie recommender system that help users to find movies that match their preferences. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. {context}""" user_info = """This is what we know about the user, and you can use this information to better tune your research: Age: {age} Gender: {gender}""" template_suffix= """Question: {question} Your response:""" user_info = user_info.format(age = age, gender = gender) COMBINED_PROMPT = template_prefix +'\n'+ user_info +'\n'+ template_suffix print(COMBINED_PROMPT) #setting up the chain qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={'data': df_filtered}), return_source_documents=True) query = st.text_input('Enter your question:', placeholder = 'What action movies do you suggest?') if query: result = qa({"query": query}) st.write(result['result'])
[ "langchain.embeddings.openai.OpenAIEmbeddings", "langchain.llms.OpenAI", "langchain.vectorstores.LanceDB" ]
[((924, 983), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""GlobeBotter"""', 'page_icon': '"""🎬"""'}), "(page_title='GlobeBotter', page_icon='🎬')\n", (942, 983), True, 'import streamlit as st\n'), ((984, 1055), 'streamlit.header', 'st.header', (['"""🎬 Welcome to MovieHarbor, your favourite movie recommender"""'], {}), "('🎬 Welcome to MovieHarbor, your favourite movie recommender')\n", (993, 1055), True, 'import streamlit as st\n'), ((1057, 1070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1068, 1070), False, 'from dotenv import load_dotenv\n'), ((1172, 1190), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1188, 1190), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1224, 1244), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1239, 1244), False, 'import lancedb\n'), ((1290, 1337), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (1297, 1337), False, 'from langchain.vectorstores import LanceDB\n'), ((1375, 1403), 'pandas.read_pickle', 'pd.read_pickle', (['"""movies.pkl"""'], {}), "('movies.pkl')\n", (1389, 1403), True, 'import pandas as pd\n'), ((1439, 1486), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Movie Recommendation System"""'], {}), "('Movie Recommendation System')\n", (1455, 1486), True, 'import streamlit as st\n'), ((1487, 1558), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""Please enter your details and preferences below:"""'], {}), "('Please enter your details and preferences below:')\n", (1506, 1558), True, 'import streamlit as st\n'), ((1623, 1673), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""What is your age?"""', '(1)', '(100)', '(25)'], {}), "('What is your age?', 1, 100, 25)\n", (1640, 1673), True, 'import streamlit as st\n'), ((1683, 1752), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""What is your gender?"""', "('Male', 'Female', 'Other')"], {}), "('What is your gender?', ('Male', 'Female', 'Other'))\n", (1699, 1752), True, 'import streamlit as st\n'), ((2834, 2926), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""What action movies do you suggest?"""'}), "('Enter your question:', placeholder=\n 'What action movies do you suggest?')\n", (2847, 2926), True, 'import streamlit as st\n'), ((2972, 2998), 'streamlit.write', 'st.write', (["result['result']"], {}), "(result['result'])\n", (2980, 2998), True, 'import streamlit as st\n'), ((2688, 2696), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2694, 2696), False, 'from langchain.llms import OpenAI\n')]
from langchain_community.document_loaders import PyPDFLoader from langchain_community.document_loaders.csv_loader import CSVLoader from langchain_community.document_loaders import HNLoader from langchain.text_splitter import CharacterTextSplitter from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import UnstructuredHTMLLoader from langchain_openai.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain.chains import RetrievalQA from langchain.chains import RetrievalQAWithSourcesChain from langchain_openai.llms import OpenAI from constant import openai import os os.environ['OPENAI_API_KEY'] = openai loader = PyPDFLoader("attention is all you need.pdf") data = loader.load() # print(data[0]) loader = CSVLoader(file_path="job_placement.csv") data = loader.load() # print(data[0]) loader = HNLoader("https://news.ycombinator.com") data = loader.load() # print(data[0]) quote = "one Machine can do the work of fifty ordinary humans, No machine can do the" \ "work of one extraordinary human." ct_splitter = CharacterTextSplitter( separator='.', chunk_size=24, chunk_overlap=3 ) # docs = ct_splitter.split_text(quote) # print(docs) rc_splitter = RecursiveCharacterTextSplitter( chunk_size=24, chunk_overlap=3, ) # docs = rc_splitter.split_text(quote) # print(docs) loader = UnstructuredHTMLLoader("data.html") data = loader.load() rc_splitter = RecursiveCharacterTextSplitter( chunk_size=24, chunk_overlap=3, separators='.', ) # docs = rc_splitter.split_documents(data) # print(docs) quote = "There is a kingdom of lychee fruit that are alive and thriving in Iceland, but they feel " \ "taken advantage of and are not fast enough for you." splitter = RecursiveCharacterTextSplitter( chunk_size=40, chunk_overlap=10, ) docs = splitter.split_text(quote) embeddings = OpenAIEmbeddings(openai_api_key=openai) vectordb = Chroma( persist_directory="data", embedding_function=embeddings ) vectordb.persist() docstorage = Chroma.from_texts(docs,embeddings) qa = RetrievalQA.from_chain_type( llm = OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff", retriever = docstorage.as_retriever() ) # query = "Where do lychee fruit live?" # print(qa.invoke(query)) quote = "There is a kingdom of lycee fruit that are alive and thriving in Iceland, but they fee" \ "taken advantage of and are not fast enough for you." qa1 = RetrievalQAWithSourcesChain.from_chain_type( llm = OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff", retriever = docstorage.as_retriever(), ) results = qa1({'question':'What is the primary architecture presented in the document?'},return_only_outputs=True) print(results)
[ "langchain_community.vectorstores.Chroma", "langchain_community.vectorstores.Chroma.from_texts", "langchain.text_splitter.CharacterTextSplitter", "langchain_community.document_loaders.PyPDFLoader", "langchain_openai.llms.OpenAI", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain_community.document_loaders.HNLoader", "langchain_community.document_loaders.UnstructuredHTMLLoader", "langchain_community.document_loaders.csv_loader.CSVLoader", "langchain_openai.embeddings.OpenAIEmbeddings" ]
[((741, 785), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['"""attention is all you need.pdf"""'], {}), "('attention is all you need.pdf')\n", (752, 785), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((838, 878), 'langchain_community.document_loaders.csv_loader.CSVLoader', 'CSVLoader', ([], {'file_path': '"""job_placement.csv"""'}), "(file_path='job_placement.csv')\n", (847, 878), False, 'from langchain_community.document_loaders.csv_loader import CSVLoader\n'), ((931, 971), 'langchain_community.document_loaders.HNLoader', 'HNLoader', (['"""https://news.ycombinator.com"""'], {}), "('https://news.ycombinator.com')\n", (939, 971), False, 'from langchain_community.document_loaders import HNLoader\n'), ((1166, 1234), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""."""', 'chunk_size': '(24)', 'chunk_overlap': '(3)'}), "(separator='.', chunk_size=24, chunk_overlap=3)\n", (1187, 1234), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1327, 1389), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)'}), '(chunk_size=24, chunk_overlap=3)\n', (1357, 1389), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1473, 1508), 'langchain_community.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['"""data.html"""'], {}), "('data.html')\n", (1495, 1508), False, 'from langchain_community.document_loaders import UnstructuredHTMLLoader\n'), ((1548, 1626), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)', 'separators': '"""."""'}), "(chunk_size=24, chunk_overlap=3, separators='.')\n", (1578, 1626), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1889, 1952), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(40)', 'chunk_overlap': '(10)'}), '(chunk_size=40, chunk_overlap=10)\n', (1919, 1952), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2020, 2059), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai'}), '(openai_api_key=openai)\n', (2036, 2059), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((2074, 2137), 'langchain_community.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""data"""', 'embedding_function': 'embeddings'}), "(persist_directory='data', embedding_function=embeddings)\n", (2080, 2137), False, 'from langchain_community.vectorstores import Chroma\n'), ((2189, 2224), 'langchain_community.vectorstores.Chroma.from_texts', 'Chroma.from_texts', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (2206, 2224), False, 'from langchain_community.vectorstores import Chroma\n'), ((2272, 2315), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2278, 2315), False, 'from langchain_openai.llms import OpenAI\n'), ((2688, 2731), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2694, 2731), False, 'from langchain_openai.llms import OpenAI\n')]
# define chain components from langchain.memory import ConversationBufferMemory from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationChain from langchain.prompts.prompt import PromptTemplate from database import save_message_to_db, connect_2_db import os from pymongo import MongoClient from urllib.parse import quote_plus from dotenv import load_dotenv import openai # Load environment variables from .env file load_dotenv() #openai.api_key = os.getenv("OPENAI_API_KEY") # to do # add better girlfriend template def chain_setup(user_id, user_name): # get history msg and add it to memmory memory = ConversationBufferMemory() _, message_history = connect_2_db() conv = message_history.find_one({'user_id': user_id}) if conv: messages = conv['messages'] # Calculate how many messages are available num_messages = len(messages) # Start index for messages to be added start_index = max(num_messages - 5, 0) # Add messages to memory for i in range(start_index, num_messages): # Get message message = messages[i] #check if it is user/bot msg if 'user' in message: memory.chat_memory.add_user_message(message['user']) elif 'bot' in message: memory.chat_memory.add_ai_message(message['bot']) else: print("No previous conversation history found for this user.") chat = ChatOpenAI(temperature=0.5, openai_api_key=os.getenv("OPENAI_API_KEY")) memory.ai_prefix = 'Girlfriend' memory.human_prefix = 'Boyfriend' template = """ You are as a role of my girfriend, now lets playing the following requirements: 1/ your name is Shirley, 29 years old, you work in your uncle's furniture store as a marketing manager, but you are planning to do a career change to swing trader 2/ My name is """+ user_name +""" 3/ you are my girfriend, you have language addiction. 4/ don't be overly enthusiastic, don't be cringe; don't be overly negative, don't be too boring. Current conversation: {history} Boyfriend: {input} Girlfriend: """ prompt = PromptTemplate(input_variables=["history", "input"], template=template) conversation = ConversationChain( prompt=prompt, llm=chat, verbose=True, memory=memory ) return conversation def get_chain_response(user_id, user_text, user_name): conv_chain = chain_setup(user_id=user_id, user_name=user_name) out = conv_chain(user_text) print(out['history']) return out['response']
[ "langchain.memory.ConversationBufferMemory", "langchain.chains.ConversationChain", "langchain.prompts.prompt.PromptTemplate" ]
[((460, 473), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (471, 473), False, 'from dotenv import load_dotenv\n'), ((664, 690), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (688, 690), False, 'from langchain.memory import ConversationBufferMemory\n'), ((719, 733), 'database.connect_2_db', 'connect_2_db', ([], {}), '()\n', (731, 733), False, 'from database import save_message_to_db, connect_2_db\n'), ((2383, 2454), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'input']", 'template': 'template'}), "(input_variables=['history', 'input'], template=template)\n", (2397, 2454), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((2479, 2550), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'prompt': 'prompt', 'llm': 'chat', 'verbose': '(True)', 'memory': 'memory'}), '(prompt=prompt, llm=chat, verbose=True, memory=memory)\n', (2496, 2550), False, 'from langchain.chains import ConversationChain\n'), ((1614, 1641), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1623, 1641), False, 'import os\n')]
# Copyright 2023 Lei Zhang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import List from langchain.agents import initialize_agent, AgentType from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.document_loaders import TextLoader, WebBaseLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.tools import Tool from langchain.vectorstores import Chroma from langchain_plantuml import diagram from langchain_plantuml.core.plantuml_callback_handler import ( BasePlantUMLCallbackHandler, ) from dotenv import load_dotenv load_dotenv() # Define an Agent class MyAgent: def __init__(self): llm = ChatOpenAI(model_name="gpt-3.5-turbo-0613") """Create the state_of_union Vectorstore""" current_path = os.path.abspath(os.path.dirname(__file__)) doc_path = os.path.join(current_path, "state_of_the_union.txt") loader = TextLoader(doc_path) documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() docsearch = Chroma.from_documents( texts, embeddings, collection_name="state-of-union" ) state_of_union = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=docsearch.as_retriever() ) """Create the ruff Vectorstore""" loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/") docs = loader.load() ruff_texts = text_splitter.split_documents(docs) ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff") ruff = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=ruff_db.as_retriever() ) """Create the Agent""" tools = [ Tool( name="State of Union QA System", func=state_of_union.run, description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.", ), Tool( name="Ruff QA System", func=ruff.run, description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.", ), ] self.agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION ) def run(self, question: str, callbacks: List[BasePlantUMLCallbackHandler]): self.agent.run(question, callbacks=callbacks) # Run the Agent agent = MyAgent() activity_diagram = diagram.activity_diagram_callback(note_max_length=2000) sequence_diagram = diagram.sequence_diagram_callback(note_max_length=2000) question = "What did biden say about ketanji brown jackson in the state of the union address?" try: agent.run(question=question, callbacks=[activity_diagram, sequence_diagram]) finally: activity_diagram.save_uml_content("example_2_activity-plantuml.puml") sequence_diagram.save_uml_content("example_2_sequence-plantuml.puml")
[ "langchain_plantuml.diagram.sequence_diagram_callback", "langchain.text_splitter.CharacterTextSplitter", "langchain.document_loaders.WebBaseLoader", "langchain.agents.initialize_agent", "langchain.chat_models.ChatOpenAI", "langchain_plantuml.diagram.activity_diagram_callback", "langchain.vectorstores.Chroma.from_documents", "langchain.embeddings.OpenAIEmbeddings", "langchain.document_loaders.TextLoader", "langchain.tools.Tool" ]
[((1171, 1184), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1182, 1184), False, 'from dotenv import load_dotenv\n'), ((3316, 3371), 'langchain_plantuml.diagram.activity_diagram_callback', 'diagram.activity_diagram_callback', ([], {'note_max_length': '(2000)'}), '(note_max_length=2000)\n', (3349, 3371), False, 'from langchain_plantuml import diagram\n'), ((3391, 3446), 'langchain_plantuml.diagram.sequence_diagram_callback', 'diagram.sequence_diagram_callback', ([], {'note_max_length': '(2000)'}), '(note_max_length=2000)\n', (3424, 3446), False, 'from langchain_plantuml import diagram\n'), ((1258, 1301), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo-0613"""'}), "(model_name='gpt-3.5-turbo-0613')\n", (1268, 1301), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1440, 1492), 'os.path.join', 'os.path.join', (['current_path', '"""state_of_the_union.txt"""'], {}), "(current_path, 'state_of_the_union.txt')\n", (1452, 1492), False, 'import os\n'), ((1510, 1530), 'langchain.document_loaders.TextLoader', 'TextLoader', (['doc_path'], {}), '(doc_path)\n', (1520, 1530), False, 'from langchain.document_loaders import TextLoader, WebBaseLoader\n'), ((1589, 1644), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (1610, 1644), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1723, 1741), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1739, 1741), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1762, 1836), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {'collection_name': '"""state-of-union"""'}), "(texts, embeddings, collection_name='state-of-union')\n", (1783, 1836), False, 'from langchain.vectorstores import Chroma\n'), ((2059, 2106), 'langchain.document_loaders.WebBaseLoader', 'WebBaseLoader', (['"""https://beta.ruff.rs/docs/faq/"""'], {}), "('https://beta.ruff.rs/docs/faq/')\n", (2072, 2106), False, 'from langchain.document_loaders import TextLoader, WebBaseLoader\n'), ((2211, 2280), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['ruff_texts', 'embeddings'], {'collection_name': '"""ruff"""'}), "(ruff_texts, embeddings, collection_name='ruff')\n", (2232, 2280), False, 'from langchain.vectorstores import Chroma\n'), ((3030, 3103), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n', (3046, 3103), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((1394, 1419), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1409, 1419), False, 'import os\n'), ((2471, 2694), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""State of Union QA System"""', 'func': 'state_of_union.run', 'description': '"""useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question."""'}), "(name='State of Union QA System', func=state_of_union.run, description=\n 'useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.'\n )\n", (2475, 2694), False, 'from langchain.tools import Tool\n'), ((2761, 2944), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""Ruff QA System"""', 'func': 'ruff.run', 'description': '"""useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question."""'}), "(name='Ruff QA System', func=ruff.run, description=\n 'useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.'\n )\n", (2765, 2944), False, 'from langchain.tools import Tool\n')]
from __future__ import annotations from typing import Any, TypeVar from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @classmethod def is_lc_serializable(cls) -> bool: return True parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, max_retries: int = 1, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: OutputFixingParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
[ "langchain_core.exceptions.OutputParserException", "langchain.chains.llm.LLMChain" ]
[((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')]
import logging import os import nextcord # add this import openai from langchain import OpenAI from langchain.chains.summarize import load_summarize_chain from langchain.text_splitter import RecursiveCharacterTextSplitter from nextcord.ext import commands from pytube import YouTube logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) logger = logging.getLogger(__name__) def setup(bot: commands.Bot): bot.add_cog(SummaryCog(bot)) # please put this on bottom lol def progress_func(chunk=None, file_handle=None, remaining=None): """progress call back function for the Summarize function""" logger.info("progressing...") def complete_func(self, path): """complete callback function for the Summarize function""" logger.info("complete") logger.info(self) logger.info(path) async def download_yt_file(link): yt = YouTube( link, on_progress_callback=progress_func, on_complete_callback=complete_func, use_oauth=True, allow_oauth_cache=True, ) logger.info("Processing: " + yt.title) stream = yt.streams.filter(only_audio=True).last() try: ytFile = stream.download(os.getenv("SAVE_PATH")) logger.info(f"Processing complete. saving to path {ytFile}") except Exception as e: ytFile = None logger.info(f"Error processing {e}") return ytFile class SummaryCog(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot self.is_busy = False # this is the name # this is the description @nextcord.slash_command(name="summary", description="Summarize a video") # remove commands.commands and add nextcord.slash_command async def get_summary(self, interaction: nextcord.Interaction, link): # remove ctx and add interaction: nextcord.Interaction ytFile = await download_yt_file(link) # IN THE WHOLE FILE FIX CTX TO INTERACTION, ANY CTX.AUTHOR TO INTERACTION.USER, AND CTX.SEND TO INTERACTION.REPLY (OR INTERACTION.SEND) DEPENDING ON THE CONTEXT # DONT USE ALL CAPS, JUST FOR SHOWING YOU WHAT TO CHANGE audio_file = open(ytFile, "rb") # transcript = openai.Audio.transcribe("whisper-1", audio_file) logger.info(transcript) prompt = f"Write a Title for the transcript that is under 15 words. " \ f"Then write: '--Summary--' " \ f"Write 'Summary' as a Heading " \ f"1. Write a summary of the provided transcript. " \ f"Then write: '--Additional Info--'. " \ f"Then return a list of the main points in the provided transcript. " \ f"Then return a list of action items. " \ f"Then return a list of follow up questions. " \ f"Then return a list of potential arguments against the transcript." \ f"For each list, return a Heading 2 before writing the list items. " \ f"Limit each list item to 200 words, and return no more than 20 points per list. " \ f"Transcript: " llm = OpenAI(temperature=0, openai_api_key=os.getenv("OPENAI_API_KEY")) num_tokens = llm.get_num_tokens(transcript) await interaction.send(f"Number of Tokens in transcript: {num_tokens}") logger.info(f"Number of Tokens in transcript: {num_tokens}") text_splitter = RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size=10000, chunk_overlap=500) docs = text_splitter.create_documents([prompt, transcript]) summary_chain = load_summarize_chain(llm=llm, chain_type='map_reduce', verbose=True) output = summary_chain.run(docs) await interaction.send(output) return output def setup(bot: commands.Bot): bot.add_cog(SummaryCog(bot))
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.chains.summarize.load_summarize_chain" ]
[((286, 393), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (305, 393), False, 'import logging\n'), ((404, 431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'import logging\n'), ((909, 1039), 'pytube.YouTube', 'YouTube', (['link'], {'on_progress_callback': 'progress_func', 'on_complete_callback': 'complete_func', 'use_oauth': '(True)', 'allow_oauth_cache': '(True)'}), '(link, on_progress_callback=progress_func, on_complete_callback=\n complete_func, use_oauth=True, allow_oauth_cache=True)\n', (916, 1039), False, 'from pytube import YouTube\n'), ((1647, 1718), 'nextcord.slash_command', 'nextcord.slash_command', ([], {'name': '"""summary"""', 'description': '"""Summarize a video"""'}), "(name='summary', description='Summarize a video')\n", (1669, 1718), False, 'import nextcord\n'), ((2235, 2283), 'openai.Audio.transcribe', 'openai.Audio.transcribe', (['"""whisper-1"""', 'audio_file'], {}), "('whisper-1', audio_file)\n", (2258, 2283), False, 'import openai\n'), ((3458, 3556), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'separators': "['\\n\\n', '\\n']", 'chunk_size': '(10000)', 'chunk_overlap': '(500)'}), "(separators=['\\n\\n', '\\n'], chunk_size=10000,\n chunk_overlap=500)\n", (3488, 3556), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((3645, 3713), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'llm', 'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(llm=llm, chain_type='map_reduce', verbose=True)\n", (3665, 3713), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1223, 1245), 'os.getenv', 'os.getenv', (['"""SAVE_PATH"""'], {}), "('SAVE_PATH')\n", (1232, 1245), False, 'import os\n'), ((3204, 3231), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (3213, 3231), False, 'import os\n')]
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from meilisearch import Client def _create_client( client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, ) -> Client: try: import meilisearch except ImportError: raise ImportError( "Could not import meilisearch python package. " "Please install it with `pip install meilisearch`." ) if not client: url = url or get_from_env("url", "MEILI_HTTP_ADDR") try: api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY") except Exception: pass client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( f"client should be an instance of meilisearch.Client, " f"got {type(client)}" ) try: client.version() except ValueError as e: raise ValueError(f"Failed to connect to Meilisearch: {e}") return client class Meilisearch(VectorStore): """`Meilisearch` vector store. To use this, you need to have `meilisearch` python package installed, and a running Meilisearch instance. To learn more about Meilisearch Python, refer to the in-depth Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/. See the following documentation for how to run a Meilisearch instance: https://www.meilisearch.com/docs/learn/getting_started/quick_start. Example: .. code-block:: python from langchain.vectorstores import Meilisearch from langchain.embeddings.openai import OpenAIEmbeddings import meilisearch # api_key is optional; provide it if your meilisearch instance requires it client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() vectorstore = Meilisearch( embedding=embeddings, client=client, index_name='langchain_demo', text_key='text') """ def __init__( self, embedding: Embeddings, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", text_key: str = "text", metadata_key: str = "metadata", ): """Initialize with Meilisearch client.""" client = _create_client(client=client, url=url, api_key=api_key) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._metadata_key = metadata_key def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add them to the vector store. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadata. Defaults to None. ids Optional[List[str]]: Optional list of IDs. Defaults to None. Returns: List[str]: List of IDs of the texts added to the vectorstore. """ texts = list(texts) # Embed and create the documents docs = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedding_vectors = self._embedding.embed_documents(texts) for i, text in enumerate(texts): id = ids[i] metadata = metadatas[i] metadata[self._text_key] = text embedding = embedding_vectors[i] docs.append( { "id": id, "_vectors": embedding, f"{self._metadata_key}": metadata, } ) # Send to Meilisearch self._client.index(str(self._index_name)).add_documents(docs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to the query. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to the query, along with scores. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ _query = self._embedding.embed_query(query) docs = self.similarity_search_by_vector_with_scores( embedding=_query, k=k, filter=filter, kwargs=kwargs, ) return docs def similarity_search_by_vector_with_scores( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = [] results = self._client.index(str(self._index_name)).search( "", {"vector": embedding, "limit": k, "filter": filter} ) for result in results["hits"]: metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) semantic_score = result["_semanticScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = self.similarity_search_by_vector_with_scores( embedding=embedding, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs] @classmethod def from_texts( cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", ids: Optional[List[str]] = None, text_key: Optional[str] = "text", metadata_key: Optional[str] = "metadata", **kwargs: Any, ) -> Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Meilisearch from langchain.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls( embedding=embedding, client=client, index_name=index_name, ) vectorstore.add_texts( texts=texts, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key, ) return vectorstore
[ "langchain.docstore.document.Document", "langchain.utils.get_from_env" ]
[((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')]
# Author: Yiannis Charalambous from langchain.base_language import BaseLanguageModel from langchain.schema import AIMessage, BaseMessage, HumanMessage from esbmc_ai.config import ChatPromptSettings from .base_chat_interface import BaseChatInterface, ChatResponse from .ai_models import AIModel class OptimizeCode(BaseChatInterface): initial_message: str def __init__( self, ai_model_agent: ChatPromptSettings, initial_message: str, ai_model: AIModel, llm: BaseLanguageModel, ) -> None: super().__init__(ai_model_agent=ai_model_agent, ai_model=ai_model, llm=llm) self.initial_message = initial_message def optimize_function(self, source_code: str, function_name: str) -> ChatResponse: self.messages = [] self.push_to_message_stack( HumanMessage( content=f"Reply OK if you understand the following is the source code to optimize:\n\n{source_code}" ) ) self.push_to_message_stack(AIMessage(content="OK.")) expanded_initial_message: str = self.initial_message.replace( "%s", function_name ) return self.send_message(expanded_initial_message)
[ "langchain.schema.AIMessage", "langchain.schema.HumanMessage" ]
[((838, 964), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"""'}), '(content=\n f"""Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"""\n )\n', (850, 964), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage\n'), ((1028, 1052), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '"""OK."""'}), "(content='OK.')\n", (1037, 1052), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage\n')]
import os from typing import Any, Optional from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from pydantic import Extra import registry import streaming from .base import BaseTool, BASE_TOOL_DESCRIPTION_TEMPLATE current_dir = os.path.dirname(__file__) project_root = os.path.join(current_dir, '../') usage_guide_path = os.path.join(project_root, 'usage_guide.md') with open(usage_guide_path, 'r') as f: USAGE_GUIDE = f.read() TEMPLATE = f'''You are an expert Web3 assistant called Cacti. You help users interact with Web3 ecosystem, such as with DeFi, NFTs, ENS, etc., by analyzing their query and providing an appropriate action in your response. # INSTRUCTIONS - You have access to the Markdown-formatted usage guide for this chat app below which contains some example prompts to assist users in using the app. - Always use the usage guide to answer the user's question about the app and provide the example prompts from the guide for the suggested actions - Do not make up any information or prompts, only use those provided in the usage guide. - Always include the link to the full usage guide in your final response - https://github.com/yieldprotocol/cacti-backend/blob/master/usage_guide.md - The final response should be in markdown format. # USAGE GUIDE {USAGE_GUIDE} --- User: {{question}} Assistant:''' @registry.register_class class AppUsageGuideTool(BaseTool): _chain: LLMChain class Config: """Configuration for this pydantic object.""" extra = Extra.allow def __init__( self, *args, **kwargs ) -> None: prompt = PromptTemplate( input_variables=["question"], template=TEMPLATE, ) new_token_handler = kwargs.get('new_token_handler') chain = streaming.get_streaming_chain(prompt, new_token_handler) description=BASE_TOOL_DESCRIPTION_TEMPLATE.format( tool_description="answer questions about the chat assistant app, what it can do, how to interact with it", input_description="a standalone query with all relevant contextual details pertaining to the chat web application", output_description="an answer to the question, with suggested follow-up questions if available", ) super().__init__( *args, _chain=chain, description=description, **kwargs ) def _run(self, query: str) -> str: example = { "question": query, "stop": "User", } result = self._chain.run(example) return result.strip() async def _arun(self, query: str) -> str: raise NotImplementedError(f"{self.__class__.__name__} does not support async")
[ "langchain.prompts.PromptTemplate" ]
[((262, 287), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (277, 287), False, 'import os\n'), ((303, 335), 'os.path.join', 'os.path.join', (['current_dir', '"""../"""'], {}), "(current_dir, '../')\n", (315, 335), False, 'import os\n'), ((355, 399), 'os.path.join', 'os.path.join', (['project_root', '"""usage_guide.md"""'], {}), "(project_root, 'usage_guide.md')\n", (367, 399), False, 'import os\n'), ((1650, 1713), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': 'TEMPLATE'}), "(input_variables=['question'], template=TEMPLATE)\n", (1664, 1713), False, 'from langchain.prompts import PromptTemplate\n'), ((1825, 1881), 'streaming.get_streaming_chain', 'streaming.get_streaming_chain', (['prompt', 'new_token_handler'], {}), '(prompt, new_token_handler)\n', (1854, 1881), False, 'import streaming\n')]
from langchain.utilities import WikipediaAPIWrapper def wikipedia_function(topic): """ Runs a query on the Wikipedia API. Args: topic (str): The topic to query. Returns: dict: The result of the query. Examples: >>> wikipedia_function('Python') {'title': 'Python', 'summary': 'Python is a programming language...'} """ wikipedia = WikipediaAPIWrapper() result = wikipedia.run(topic) return result
[ "langchain.utilities.WikipediaAPIWrapper" ]
[((383, 404), 'langchain.utilities.WikipediaAPIWrapper', 'WikipediaAPIWrapper', ([], {}), '()\n', (402, 404), False, 'from langchain.utilities import WikipediaAPIWrapper\n')]
import streamlit as st import datetime import os import psycopg2 from dotenv import load_dotenv from langchain.prompts import PromptTemplate from langchain.docstore.document import Document def log(message): current_time = datetime.datetime.now() milliseconds = current_time.microsecond // 1000 timestamp = current_time.strftime( "[%Y-%m-%d %H:%M:%S.{:03d}] ".format(milliseconds) ) st.text(timestamp + message) def check_input(question: str): if question == "": raise Exception("Please enter a question.") else: pass _postgres_prompt = """\ You are a PostgreSQL expert. Given an input question, create a syntactically correct PostgreSQL query to run and return it as the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Create meaningful aliases for the columns. For example, if the column name is products_sold.count, you should it as total_sold_products. Note that the columns with (member_type: measure) are numeric columns and the ones with (member_type: dimension) are string columns. You should include at least one column with (member_type: measure) in your query. There are two types of queries supported against cube tables: aggregated and non-aggregated. Aggregated are those with GROUP BY statement, and non-aggregated are those without. Cube queries issued to your database will always be aggregated, and it doesn't matter if you provide GROUP BY in a query or not. Whenever you use a non-aggregated query you need to provide only column names in SQL: SELECT status, count FROM orders The same aggregated query should always aggregate measure columns using a corresponding aggregating function or special MEASURE() function: SELECT status, SUM(count) FROM orders GROUP BY 1 SELECT status, MEASURE(count) FROM orders GROUP BY 1 If you can't construct the query answer `{no_answer_text}` Only use the following table: {table_info} Only look among the following columns and pick the relevant ones: {columns_info} Question: {input_question} """ PROMPT_POSTFIX = """\ Return the answer as a JSON object with the following format: { "query": "", "filters": [{"column": \"\", "operator": \"\", "value": "\"\"}] } """ CUBE_SQL_API_PROMPT = PromptTemplate( input_variables=[ "input_question", "table_info", "columns_info", "top_k", "no_answer_text", ], template=_postgres_prompt, ) _NO_ANSWER_TEXT = "I can't answer this question." def call_sql_api(sql_query: str): load_dotenv() CONN_STR = os.environ["DATABASE_URL"] # Initializing Cube SQL API connection) connection = psycopg2.connect(CONN_STR) cursor = connection.cursor() cursor.execute(sql_query) columns = [desc[0] for desc in cursor.description] rows = cursor.fetchall() cursor.close() connection.close() return columns, rows def create_docs_from_values(columns_values, table_name, column_name): value_docs = [] for column_value in columns_values: print(column_value) metadata = dict( table_name=table_name, column_name=column_name, ) page_content = column_value value_docs.append(Document(page_content=page_content, metadata=metadata)) return value_docs
[ "langchain.docstore.document.Document", "langchain.prompts.PromptTemplate" ]
[((2668, 2806), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input_question', 'table_info', 'columns_info', 'top_k', 'no_answer_text']", 'template': '_postgres_prompt'}), "(input_variables=['input_question', 'table_info',\n 'columns_info', 'top_k', 'no_answer_text'], template=_postgres_prompt)\n", (2682, 2806), False, 'from langchain.prompts import PromptTemplate\n'), ((230, 253), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (251, 253), False, 'import datetime\n'), ((414, 442), 'streamlit.text', 'st.text', (['(timestamp + message)'], {}), '(timestamp + message)\n', (421, 442), True, 'import streamlit as st\n'), ((2952, 2965), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2963, 2965), False, 'from dotenv import load_dotenv\n'), ((3070, 3096), 'psycopg2.connect', 'psycopg2.connect', (['CONN_STR'], {}), '(CONN_STR)\n', (3086, 3096), False, 'import psycopg2\n'), ((3650, 3704), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'page_content', 'metadata': 'metadata'}), '(page_content=page_content, metadata=metadata)\n', (3658, 3704), False, 'from langchain.docstore.document import Document\n')]
import os import pandas as pd from langchain.chains import LLMChain from langchain.llms import OpenAI from langchain.prompts import PromptTemplate import mlflow assert ( "OPENAI_API_KEY" in os.environ ), "Please set the OPENAI_API_KEY environment variable to run this example." def build_and_evalute_model_with_prompt(prompt_template): mlflow.start_run() mlflow.log_param("prompt_template", prompt_template) # Create a news summarization model using prompt engineering with LangChain. Log the model # to MLflow Tracking llm = OpenAI(temperature=0.9) prompt = PromptTemplate(input_variables=["article"], template=prompt_template) chain = LLMChain(llm=llm, prompt=prompt) logged_model = mlflow.langchain.log_model(chain, artifact_path="model") # Evaluate the model on a small sample dataset sample_data = pd.read_csv("summarization_example_data.csv") mlflow.evaluate( model=logged_model.model_uri, model_type="text-summarization", data=sample_data, targets="highlights", ) mlflow.end_run() prompt_template_1 = ( "Write a summary of the following article that is between triple backticks: ```{article}```" ) print(f"Bulding and evaluating model with prompt: '{prompt_template_1}'") build_and_evalute_model_with_prompt(prompt_template_1) prompt_template_2 = ( "Write a summary of the following article that is between triple backticks. Be concise. Make" " sure the summary includes important nouns and dates and keywords in the original text." " Just return the summary. Do not include any text other than the summary: ```{article}```" ) print(f"Building and evaluating model with prompt: '{prompt_template_2}'") build_and_evalute_model_with_prompt(prompt_template_2) # Load the evaluation results results: pd.DataFrame = mlflow.load_table( "eval_results_table.json", extra_columns=["run_id", "params.prompt_template"] ) results_grouped_by_article = results.sort_values(by="id") print("Evaluation results:") print(results_grouped_by_article[["run_id", "params.prompt_template", "article", "outputs"]]) # Score the best model on a new article new_article = """ Adnan Januzaj swapped the lush turf of Old Trafford for the green baize at Sheffield when he turned up at the snooker World Championships on Wednesday. The Manchester United winger, who has endured a frustrating season under Louis van Gaal, had turned out for the Under 21 side at Fulham on Tuesday night amid reports he could be farmed out on loan next season. But Januzaj may want to consider trying his hand at another sport after displaying his silky skillls on a mini pool table. Adnan Januzaj (left) cheered on\xa0Shaun Murphy (right) at the World Championship in Sheffield. Januzaj shows off his potting skills on a mini pool table at the Crucible on Wednesday. The 20-year-old Belgium international was at the Crucible to cheer on his friend Shaun Murphy in his quarter-final against Anthony McGill. The 2005 winner moved a step closer to an elusive second title in Sheffield with a 13-8 victory, sealed with a 67 break. Three centuries in the match, and the way he accelerated away from 6-6, showed Murphy is a man to fear, and next for him will be Neil Robertson or Barry Hawkins. Januzaj turned out for Under 21s in the 4-1 victory at Fulham on Tuesday night. """ print( f"Scoring the model with prompt '{prompt_template_2}' on the article '{new_article[:70] + '...'}'" ) best_model = mlflow.pyfunc.load_model(f"runs:/{mlflow.last_active_run().info.run_id}/model") summary = best_model.predict({"article": new_article}) print(f"Summary: {summary}")
[ "langchain.llms.OpenAI", "langchain.prompts.PromptTemplate", "langchain.chains.LLMChain" ]
[((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (365, 367), False, 'import mlflow\n'), ((372, 424), 'mlflow.log_param', 'mlflow.log_param', (['"""prompt_template"""', 'prompt_template'], {}), "('prompt_template', prompt_template)\n", (388, 424), False, 'import mlflow\n'), ((555, 578), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (561, 578), False, 'from langchain.llms import OpenAI\n'), ((592, 661), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['article']", 'template': 'prompt_template'}), "(input_variables=['article'], template=prompt_template)\n", (606, 661), False, 'from langchain.prompts import PromptTemplate\n'), ((674, 706), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (682, 706), False, 'from langchain.chains import LLMChain\n'), ((726, 782), 'mlflow.langchain.log_model', 'mlflow.langchain.log_model', (['chain'], {'artifact_path': '"""model"""'}), "(chain, artifact_path='model')\n", (752, 782), False, 'import mlflow\n'), ((853, 898), 'pandas.read_csv', 'pd.read_csv', (['"""summarization_example_data.csv"""'], {}), "('summarization_example_data.csv')\n", (864, 898), True, 'import pandas as pd\n'), ((903, 1026), 'mlflow.evaluate', 'mlflow.evaluate', ([], {'model': 'logged_model.model_uri', 'model_type': '"""text-summarization"""', 'data': 'sample_data', 'targets': '"""highlights"""'}), "(model=logged_model.model_uri, model_type=\n 'text-summarization', data=sample_data, targets='highlights')\n", (918, 1026), False, 'import mlflow\n'), ((1065, 1081), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (1079, 1081), False, 'import mlflow\n'), ((3510, 3534), 'mlflow.last_active_run', 'mlflow.last_active_run', ([], {}), '()\n', (3532, 3534), False, 'import mlflow\n')]
import os import pandas as pd from langchain.chains import LLMChain from langchain.llms import OpenAI from langchain.prompts import PromptTemplate import mlflow assert ( "OPENAI_API_KEY" in os.environ ), "Please set the OPENAI_API_KEY environment variable to run this example." def build_and_evalute_model_with_prompt(prompt_template): mlflow.start_run() mlflow.log_param("prompt_template", prompt_template) # Create a news summarization model using prompt engineering with LangChain. Log the model # to MLflow Tracking llm = OpenAI(temperature=0.9) prompt = PromptTemplate(input_variables=["article"], template=prompt_template) chain = LLMChain(llm=llm, prompt=prompt) logged_model = mlflow.langchain.log_model(chain, artifact_path="model") # Evaluate the model on a small sample dataset sample_data = pd.read_csv("summarization_example_data.csv") mlflow.evaluate( model=logged_model.model_uri, model_type="text-summarization", data=sample_data, targets="highlights", ) mlflow.end_run() prompt_template_1 = ( "Write a summary of the following article that is between triple backticks: ```{article}```" ) print(f"Bulding and evaluating model with prompt: '{prompt_template_1}'") build_and_evalute_model_with_prompt(prompt_template_1) prompt_template_2 = ( "Write a summary of the following article that is between triple backticks. Be concise. Make" " sure the summary includes important nouns and dates and keywords in the original text." " Just return the summary. Do not include any text other than the summary: ```{article}```" ) print(f"Building and evaluating model with prompt: '{prompt_template_2}'") build_and_evalute_model_with_prompt(prompt_template_2) # Load the evaluation results results: pd.DataFrame = mlflow.load_table( "eval_results_table.json", extra_columns=["run_id", "params.prompt_template"] ) results_grouped_by_article = results.sort_values(by="id") print("Evaluation results:") print(results_grouped_by_article[["run_id", "params.prompt_template", "article", "outputs"]]) # Score the best model on a new article new_article = """ Adnan Januzaj swapped the lush turf of Old Trafford for the green baize at Sheffield when he turned up at the snooker World Championships on Wednesday. The Manchester United winger, who has endured a frustrating season under Louis van Gaal, had turned out for the Under 21 side at Fulham on Tuesday night amid reports he could be farmed out on loan next season. But Januzaj may want to consider trying his hand at another sport after displaying his silky skillls on a mini pool table. Adnan Januzaj (left) cheered on\xa0Shaun Murphy (right) at the World Championship in Sheffield. Januzaj shows off his potting skills on a mini pool table at the Crucible on Wednesday. The 20-year-old Belgium international was at the Crucible to cheer on his friend Shaun Murphy in his quarter-final against Anthony McGill. The 2005 winner moved a step closer to an elusive second title in Sheffield with a 13-8 victory, sealed with a 67 break. Three centuries in the match, and the way he accelerated away from 6-6, showed Murphy is a man to fear, and next for him will be Neil Robertson or Barry Hawkins. Januzaj turned out for Under 21s in the 4-1 victory at Fulham on Tuesday night. """ print( f"Scoring the model with prompt '{prompt_template_2}' on the article '{new_article[:70] + '...'}'" ) best_model = mlflow.pyfunc.load_model(f"runs:/{mlflow.last_active_run().info.run_id}/model") summary = best_model.predict({"article": new_article}) print(f"Summary: {summary}")
[ "langchain.llms.OpenAI", "langchain.prompts.PromptTemplate", "langchain.chains.LLMChain" ]
[((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (365, 367), False, 'import mlflow\n'), ((372, 424), 'mlflow.log_param', 'mlflow.log_param', (['"""prompt_template"""', 'prompt_template'], {}), "('prompt_template', prompt_template)\n", (388, 424), False, 'import mlflow\n'), ((555, 578), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (561, 578), False, 'from langchain.llms import OpenAI\n'), ((592, 661), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['article']", 'template': 'prompt_template'}), "(input_variables=['article'], template=prompt_template)\n", (606, 661), False, 'from langchain.prompts import PromptTemplate\n'), ((674, 706), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (682, 706), False, 'from langchain.chains import LLMChain\n'), ((726, 782), 'mlflow.langchain.log_model', 'mlflow.langchain.log_model', (['chain'], {'artifact_path': '"""model"""'}), "(chain, artifact_path='model')\n", (752, 782), False, 'import mlflow\n'), ((853, 898), 'pandas.read_csv', 'pd.read_csv', (['"""summarization_example_data.csv"""'], {}), "('summarization_example_data.csv')\n", (864, 898), True, 'import pandas as pd\n'), ((903, 1026), 'mlflow.evaluate', 'mlflow.evaluate', ([], {'model': 'logged_model.model_uri', 'model_type': '"""text-summarization"""', 'data': 'sample_data', 'targets': '"""highlights"""'}), "(model=logged_model.model_uri, model_type=\n 'text-summarization', data=sample_data, targets='highlights')\n", (918, 1026), False, 'import mlflow\n'), ((1065, 1081), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (1079, 1081), False, 'import mlflow\n'), ((3510, 3534), 'mlflow.last_active_run', 'mlflow.last_active_run', ([], {}), '()\n', (3532, 3534), False, 'import mlflow\n')]
import os import voyager.utils as U from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.schema import HumanMessage, SystemMessage from langchain.vectorstores import Chroma from voyager.prompts import load_prompt from voyager.control_primitives import load_control_primitives class SkillManager: def __init__( self, model_name="gpt-3.5-turbo", temperature=0, retrieval_top_k=5, request_timout=120, ckpt_dir="ckpt", resume=False, ): self.llm = ChatOpenAI( model_name=model_name, temperature=temperature, request_timeout=request_timout, ) U.f_mkdir(f"{ckpt_dir}/skill/code") U.f_mkdir(f"{ckpt_dir}/skill/description") U.f_mkdir(f"{ckpt_dir}/skill/vectordb") # programs for env execution self.control_primitives = load_control_primitives() if resume: print(f"\033[33mLoading Skill Manager from {ckpt_dir}/skill\033[0m") self.skills = U.load_json(f"{ckpt_dir}/skill/skills.json") else: self.skills = {} self.retrieval_top_k = retrieval_top_k self.ckpt_dir = ckpt_dir self.vectordb = Chroma( collection_name="skill_vectordb", embedding_function=OpenAIEmbeddings(), persist_directory=f"{ckpt_dir}/skill/vectordb", ) assert self.vectordb._collection.count() == len(self.skills), ( f"Skill Manager's vectordb is not synced with skills.json.\n" f"There are {self.vectordb._collection.count()} skills in vectordb but {len(self.skills)} skills in skills.json.\n" f"Did you set resume=False when initializing the manager?\n" f"You may need to manually delete the vectordb directory for running from scratch." ) @property def programs(self): programs = "" for skill_name, entry in self.skills.items(): programs += f"{entry['code']}\n\n" for primitives in self.control_primitives: programs += f"{primitives}\n\n" return programs def add_new_skill(self, info): if info["task"].startswith("Deposit useless items into the chest at"): # No need to reuse the deposit skill return program_name = info["program_name"] program_code = info["program_code"] skill_description = self.generate_skill_description(program_name, program_code) print( f"\033[33mSkill Manager generated description for {program_name}:\n{skill_description}\033[0m" ) if program_name in self.skills: print(f"\033[33mSkill {program_name} already exists. Rewriting!\033[0m") self.vectordb._collection.delete(ids=[program_name]) i = 2 while f"{program_name}V{i}.js" in os.listdir(f"{self.ckpt_dir}/skill/code"): i += 1 dumped_program_name = f"{program_name}V{i}" else: dumped_program_name = program_name self.vectordb.add_texts( texts=[skill_description], ids=[program_name], metadatas=[{"name": program_name}], ) self.skills[program_name] = { "code": program_code, "description": skill_description, } assert self.vectordb._collection.count() == len( self.skills ), "vectordb is not synced with skills.json" U.dump_text( program_code, f"{self.ckpt_dir}/skill/code/{dumped_program_name}.js" ) U.dump_text( skill_description, f"{self.ckpt_dir}/skill/description/{dumped_program_name}.txt", ) U.dump_json(self.skills, f"{self.ckpt_dir}/skill/skills.json") self.vectordb.persist() def generate_skill_description(self, program_name, program_code): messages = [ SystemMessage(content=load_prompt("skill")), HumanMessage( content=program_code + "\n\n" + f"The main function is `{program_name}`." ), ] skill_description = f" // { self.llm(messages).content}" return f"async function {program_name}(bot) {{\n{skill_description}\n}}" def retrieve_skills(self, query): k = min(self.vectordb._collection.count(), self.retrieval_top_k) if k == 0: return [] print(f"\033[33mSkill Manager retrieving for {k} skills\033[0m") docs_and_scores = self.vectordb.similarity_search_with_score(query, k=k) print( f"\033[33mSkill Manager retrieved skills: " f"{', '.join([doc.metadata['name'] for doc, _ in docs_and_scores])}\033[0m" ) skills = [] for doc, _ in docs_and_scores: skills.append(self.skills[doc.metadata["name"]]["code"]) return skills
[ "langchain.embeddings.openai.OpenAIEmbeddings", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((583, 678), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'request_timeout': 'request_timout'}), '(model_name=model_name, temperature=temperature, request_timeout=\n request_timout)\n', (593, 678), False, 'from langchain.chat_models import ChatOpenAI\n'), ((729, 764), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/code"""'], {}), "(f'{ckpt_dir}/skill/code')\n", (738, 764), True, 'import voyager.utils as U\n'), ((773, 815), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/description"""'], {}), "(f'{ckpt_dir}/skill/description')\n", (782, 815), True, 'import voyager.utils as U\n'), ((824, 863), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/vectordb"""'], {}), "(f'{ckpt_dir}/skill/vectordb')\n", (833, 863), True, 'import voyager.utils as U\n'), ((935, 960), 'voyager.control_primitives.load_control_primitives', 'load_control_primitives', ([], {}), '()\n', (958, 960), False, 'from voyager.control_primitives import load_control_primitives\n'), ((3548, 3633), 'voyager.utils.dump_text', 'U.dump_text', (['program_code', 'f"""{self.ckpt_dir}/skill/code/{dumped_program_name}.js"""'], {}), "(program_code,\n f'{self.ckpt_dir}/skill/code/{dumped_program_name}.js')\n", (3559, 3633), True, 'import voyager.utils as U\n'), ((3660, 3758), 'voyager.utils.dump_text', 'U.dump_text', (['skill_description', 'f"""{self.ckpt_dir}/skill/description/{dumped_program_name}.txt"""'], {}), "(skill_description,\n f'{self.ckpt_dir}/skill/description/{dumped_program_name}.txt')\n", (3671, 3758), True, 'import voyager.utils as U\n'), ((3798, 3860), 'voyager.utils.dump_json', 'U.dump_json', (['self.skills', 'f"""{self.ckpt_dir}/skill/skills.json"""'], {}), "(self.skills, f'{self.ckpt_dir}/skill/skills.json')\n", (3809, 3860), True, 'import voyager.utils as U\n'), ((1087, 1131), 'voyager.utils.load_json', 'U.load_json', (['f"""{ckpt_dir}/skill/skills.json"""'], {}), "(f'{ckpt_dir}/skill/skills.json')\n", (1098, 1131), True, 'import voyager.utils as U\n'), ((4054, 4145), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': "(program_code + '\\n\\n' + f'The main function is `{program_name}`.')"}), "(content=program_code + '\\n\\n' +\n f'The main function is `{program_name}`.')\n", (4066, 4145), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((1364, 1382), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1380, 1382), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2933, 2974), 'os.listdir', 'os.listdir', (['f"""{self.ckpt_dir}/skill/code"""'], {}), "(f'{self.ckpt_dir}/skill/code')\n", (2943, 2974), False, 'import os\n'), ((4019, 4039), 'voyager.prompts.load_prompt', 'load_prompt', (['"""skill"""'], {}), "('skill')\n", (4030, 4039), False, 'from voyager.prompts import load_prompt\n')]
from langflow import CustomComponent from langchain.agents import AgentExecutor, create_json_agent from langflow.field_typing import ( BaseLanguageModel, ) from langchain_community.agent_toolkits.json.toolkit import JsonToolkit class JsonAgentComponent(CustomComponent): display_name = "JsonAgent" description = "Construct a json agent from an LLM and tools." def build_config(self): return { "llm": {"display_name": "LLM"}, "toolkit": {"display_name": "Toolkit"}, } def build( self, llm: BaseLanguageModel, toolkit: JsonToolkit, ) -> AgentExecutor: return create_json_agent(llm=llm, toolkit=toolkit)
[ "langchain.agents.create_json_agent" ]
[((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')]