Issues in RetrievalQA

#1
by amouchet - opened

Hi!

I am encountering an issue when using the .gguf file with Llama.cpp as a RetrievalQA chain, i get the error 'OSError: [WinError -529697949] Windows Error 0xe06d7363'...

This is very weird as the model work otherwise in a simple LLamaChain, if anyone has an idea of what might be the issue this would be greatly appreciated !

The full error message is:

OSError Traceback (most recent call last)
Cell In[11], line 3
1 query = "Quel est un motif d'exclusion ?"
----> 3 qa.run(query)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core_api\deprecation.py:142, in deprecated..deprecate..warning_emitting_wrapper(*args, **kwargs)
140 warned = True
141 emit_warning()
--> 142 return wrapped(*args, **kwargs)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:538, in Chain.run(self, callbacks, tags, metadata, *args, **kwargs)
536 if len(args) != 1:
537 raise ValueError("run supports only one positional argument.")
--> 538 return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
539 _output_key
540 ]
542 if kwargs and not args:
543 return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
544 _output_key
545 ]

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core_api\deprecation.py:142, in deprecated..deprecate..warning_emitting_wrapper(*args, **kwargs)
140 warned = True
141 emit_warning()
--> 142 return wrapped(*args, **kwargs)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:363, in Chain.call(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
331 """Execute the chain.
332
333 Args:
(...)
354 Chain.output_keys.
355 """
356 config = {
357 "callbacks": callbacks,
358 "tags": tags,
359 "metadata": metadata,
360 "run_name": run_name,
361 }
--> 363 return self.invoke(
364 inputs,
365 cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
366 return_only_outputs=return_only_outputs,
367 include_run_info=include_run_info,
368 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:162, in Chain.invoke(self, input, config, **kwargs)
160 except BaseException as e:
161 run_manager.on_chain_error(e)
--> 162 raise e
163 run_manager.on_chain_end(outputs)
164 final_outputs: Dict[str, Any] = self.prep_outputs(
165 inputs, outputs, return_only_outputs
166 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:156, in Chain.invoke(self, input, config, **kwargs)
149 run_manager = callback_manager.on_chain_start(
150 dumpd(self),
151 inputs,
152 name=run_name,
153 )
154 try:
155 outputs = (
--> 156 self._call(inputs, run_manager=run_manager)
157 if new_arg_supported
158 else self._call(inputs)
159 )
160 except BaseException as e:
161 run_manager.on_chain_error(e)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\retrieval_qa\base.py:144, in BaseRetrievalQA._call(self, inputs, run_manager)
142 else:
143 docs = self._get_docs(question) # type: ignore[call-arg]
--> 144 answer = self.combine_documents_chain.run(
145 input_documents=docs, question=question, callbacks=_run_manager.get_child()
146 )
148 if self.return_source_documents:
149 return {self.output_key: answer, "source_documents": docs}

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core_api\deprecation.py:142, in deprecated..deprecate..warning_emitting_wrapper(*args, **kwargs)
140 warned = True
141 emit_warning()
--> 142 return wrapped(*args, **kwargs)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:543, in Chain.run(self, callbacks, tags, metadata, *args, **kwargs)
538 return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
539 _output_key
540 ]
542 if kwargs and not args:
--> 543 return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
544 _output_key
545 ]
547 if not kwargs and not args:
548 raise ValueError(
549 "run supported with either positional arguments or keyword arguments,"
550 " but none were provided."
551 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core_api\deprecation.py:142, in deprecated..deprecate..warning_emitting_wrapper(*args, **kwargs)
140 warned = True
141 emit_warning()
--> 142 return wrapped(*args, **kwargs)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:363, in Chain.call(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
331 """Execute the chain.
332
333 Args:
(...)
354 Chain.output_keys.
355 """
356 config = {
357 "callbacks": callbacks,
358 "tags": tags,
359 "metadata": metadata,
360 "run_name": run_name,
361 }
--> 363 return self.invoke(
364 inputs,
365 cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
366 return_only_outputs=return_only_outputs,
367 include_run_info=include_run_info,
368 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:162, in Chain.invoke(self, input, config, **kwargs)
160 except BaseException as e:
161 run_manager.on_chain_error(e)
--> 162 raise e
163 run_manager.on_chain_end(outputs)
164 final_outputs: Dict[str, Any] = self.prep_outputs(
165 inputs, outputs, return_only_outputs
166 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:156, in Chain.invoke(self, input, config, **kwargs)
149 run_manager = callback_manager.on_chain_start(
150 dumpd(self),
151 inputs,
152 name=run_name,
153 )
154 try:
155 outputs = (
--> 156 self._call(inputs, run_manager=run_manager)
157 if new_arg_supported
158 else self._call(inputs)
159 )
160 except BaseException as e:
161 run_manager.on_chain_error(e)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\combine_documents\base.py:136, in BaseCombineDocumentsChain._call(self, inputs, run_manager)
134 # Other keys are assumed to be needed for LLM prediction
135 other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
--> 136 output, extra_return_dict = self.combine_docs(
137 docs, callbacks=_run_manager.get_child(), **other_keys
138 )
139 extra_return_dict[self.output_key] = output
140 return extra_return_dict

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\combine_documents\stuff.py:244, in StuffDocumentsChain.combine_docs(self, docs, callbacks, **kwargs)
242 inputs = self._get_inputs(docs, **kwargs)
243 # Call predict on the LLM.
--> 244 return self.llm_chain.predict(callbacks=callbacks, **inputs), {}

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\llm.py:293, in LLMChain.predict(self, callbacks, **kwargs)
278 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
279 """Format prompt with kwargs and pass to LLM.
280
281 Args:
(...)
291 completion = llm.predict(adjective="funny")
292 """
--> 293 return self(kwargs, callbacks=callbacks)[self.output_key]

File c:\Users\userAppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core_api\deprecation.py:142, in deprecated..deprecate..warning_emitting_wrapper(*args, **kwargs)
140 warned = True
141 emit_warning()
--> 142 return wrapped(*args, **kwargs)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:363, in Chain.call(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
331 """Execute the chain.
332
333 Args:
(...)
354 Chain.output_keys.
355 """
356 config = {
357 "callbacks": callbacks,
358 "tags": tags,
359 "metadata": metadata,
360 "run_name": run_name,
361 }
--> 363 return self.invoke(
364 inputs,
365 cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
366 return_only_outputs=return_only_outputs,
367 include_run_info=include_run_info,
368 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:162, in Chain.invoke(self, input, config, **kwargs)
160 except BaseException as e:
161 run_manager.on_chain_error(e)
--> 162 raise e
163 run_manager.on_chain_end(outputs)
164 final_outputs: Dict[str, Any] = self.prep_outputs(
165 inputs, outputs, return_only_outputs
166 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\base.py:156, in Chain.invoke(self, input, config, **kwargs)
149 run_manager = callback_manager.on_chain_start(
150 dumpd(self),
151 inputs,
152 name=run_name,
153 )
154 try:
155 outputs = (
--> 156 self._call(inputs, run_manager=run_manager)
157 if new_arg_supported
158 else self._call(inputs)
159 )
160 except BaseException as e:
161 run_manager.on_chain_error(e)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\llm.py:103, in LLMChain._call(self, inputs, run_manager)
98 def _call(
99 self,
100 inputs: Dict[str, Any],
101 run_manager: Optional[CallbackManagerForChainRun] = None,
102 ) -> Dict[str, str]:
--> 103 response = self.generate([inputs], run_manager=run_manager)
104 return self.create_outputs(response)[0]

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\chains\llm.py:115, in LLMChain.generate(self, input_list, run_manager)
113 callbacks = run_manager.get_child() if run_manager else None
114 if isinstance(self.llm, BaseLanguageModel):
--> 115 return self.llm.generate_prompt(
116 prompts,
117 stop,
118 callbacks=callbacks,
119 **self.llm_kwargs,
120 )
121 else:
122 results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(
123 cast(List, prompts), {"callbacks": callbacks}
124 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core\language_models\llms.py:525, in BaseLLM.generate_prompt(self, prompts, stop, callbacks, **kwargs)
517 def generate_prompt(
518 self,
519 prompts: List[PromptValue],
(...)
522 **kwargs: Any,
523 ) -> LLMResult:
524 prompt_strings = [p.to_string() for p in prompts]
--> 525 return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core\language_models\llms.py:698, in BaseLLM.generate(self, prompts, stop, callbacks, tags, metadata, run_name, **kwargs)
682 raise ValueError(
683 "Asked to cache, but no cache found at langchain.cache."
684 )
685 run_managers = [
686 callback_manager.on_llm_start(
687 dumpd(self),
(...)
696 )
697 ]
--> 698 output = self._generate_helper(
699 prompts, stop, run_managers, bool(new_arg_supported), **kwargs
700 )
701 return output
702 if len(missing_prompts) > 0:

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core\language_models\llms.py:562, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
560 for run_manager in run_managers:
561 run_manager.on_llm_error(e, response=LLMResult(generations=[]))
--> 562 raise e
563 flattened_outputs = output.flatten()
564 for manager, flattened_output in zip(run_managers, flattened_outputs):

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core\language_models\llms.py:549, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
539 def _generate_helper(
540 self,
541 prompts: List[str],
(...)
545 **kwargs: Any,
546 ) -> LLMResult:
547 try:
548 output = (
--> 549 self._generate(
550 prompts,
551 stop=stop,
552 # TODO: support multiple run managers
553 run_manager=run_managers[0] if run_managers else None,
554 **kwargs,
555 )
556 if new_arg_supported
557 else self._generate(prompts, stop=stop)
558 )
559 except BaseException as e:
560 for run_manager in run_managers:

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_core\language_models\llms.py:1134, in LLM._generate(self, prompts, stop, run_manager, **kwargs)
1131 new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
1132 for prompt in prompts:
1133 text = (
-> 1134 self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
1135 if new_arg_supported
1136 else self._call(prompt, stop=stop, **kwargs)
1137 )
1138 generations.append([Generation(text=text)])
1139 return LLMResult(generations=generations)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_community\llms\llamacpp.py:291, in LlamaCpp._call(self, prompt, stop, run_manager, **kwargs)
286 if self.streaming:
287 # If streaming is enabled, we use the stream
288 # method that yields as they are generated
289 # and return the combined strings from the first choices's text:
290 combined_text_output = ""
--> 291 for chunk in self._stream(
292 prompt=prompt,
293 stop=stop,
294 run_manager=run_manager,
295 **kwargs,
296 ):
297 combined_text_output += chunk.text
298 return combined_text_output

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain_community\llms\llamacpp.py:344, in LlamaCpp._stream(self, prompt, stop, run_manager, **kwargs)
342 params = {**self._get_parameters(stop), **kwargs}
343 result = self.client(prompt=prompt, stream=True, **params)
--> 344 for part in result:
345 logprobs = part["choices"][0].get("logprobs", None)
346 chunk = GenerationChunk(
347 text=part["choices"][0]["text"],
348 generation_info={"logprobs": logprobs},
349 )

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\llama_cpp\llama.py:1386, in Llama._create_completion(self, prompt, suffix, max_tokens, temperature, top_p, min_p, typical_p, logprobs, echo, stop, frequency_penalty, presence_penalty, repeat_penalty, top_k, stream, seed, tfs_z, mirostat_mode, mirostat_tau, mirostat_eta, model, stopping_criteria, logits_processor, grammar, logit_bias)
1382 completion_tokens: List[int] = [] if len(prompt) > 0 else [self.token_bos()]
1383 # Add blank space to start of prompt to match OG llama tokenizer
1384 prompt_tokens: List[int] = (
1385 (
-> 1386 self.tokenize(prompt.encode("utf-8"), special=True)
1387 if prompt != ""
1388 else [self.token_bos()]
1389 )
1390 if isinstance(prompt, str)
1391 else prompt
1392 )
1393 text: bytes = b""
1394 returned_tokens: int = 0

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\llama_cpp\llama.py:1019, in Llama.tokenize(self, text, add_bos, special)
1005 def tokenize(
1006 self, text: bytes, add_bos: bool = True, special: bool = False
1007 ) -> List[int]:
1008 """Tokenize a string.
1009
1010 Args:
(...)
1017 A list of tokens.
1018 """
-> 1019 return self._model.tokenize(text, add_bos, special)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\llama_cpp\llama.py:353, in _LlamaModel.tokenize(self, text, add_bos, special)
351 n_ctx = self.n_ctx_train()
352 tokens = (llama_cpp.llama_token * n_ctx)()
--> 353 n_tokens = llama_cpp.llama_tokenize(
354 self.model, text, len(text), tokens, n_ctx, add_bos, special
355 )
356 if n_tokens < 0:
357 n_tokens = abs(n_tokens)

File c:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\llama_cpp\llama_cpp.py:1733, in llama_tokenize(model, text, text_len, tokens, n_max_tokens, add_bos, special)
1723 def llama_tokenize(
1724 model: llama_model_p,
1725 text: bytes,
(...)
1730 special: Union[c_bool, bool],
1731 ) -> int:
1732 """Convert the provided text into tokens."""
-> 1733 return _lib.llama_tokenize(
1734 model, text, text_len, tokens, n_max_tokens, add_bos, special
1735 )

OSError: [WinError -529697949] Windows Error 0xe06d7363

Sign up or log in to comment