Refactor import statements for ChatMessageHistory in model.py and space.py
Browse files- kitt/core/model.py +0 -1
- requirements.txt +1 -0
- space.py +1 -1
kitt/core/model.py
CHANGED
@@ -6,7 +6,6 @@ import xml.etree.ElementTree as ET
|
|
6 |
from enum import Enum
|
7 |
from typing import List
|
8 |
|
9 |
-
from langchain.memory import ChatMessageHistory
|
10 |
from langchain.tools.base import StructuredTool
|
11 |
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
12 |
from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
|
6 |
from enum import Enum
|
7 |
from typing import List
|
8 |
|
|
|
9 |
from langchain.tools.base import StructuredTool
|
10 |
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
11 |
from langchain_core.utils.function_calling import convert_to_openai_tool
|
requirements.txt
CHANGED
@@ -5,6 +5,7 @@ matplotlib
|
|
5 |
wurlitzer
|
6 |
accelerate
|
7 |
bitsandbytes
|
|
|
8 |
optimum
|
9 |
# auto-gptq
|
10 |
gradio
|
|
|
5 |
wurlitzer
|
6 |
accelerate
|
7 |
bitsandbytes
|
8 |
+
https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.9.post1/flash_attn-2.5.9.post1+cu118torch1.12cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
9 |
optimum
|
10 |
# auto-gptq
|
11 |
gradio
|
space.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
from langchain.tools import tool
|
4 |
from langchain_core.utils.function_calling import convert_to_openai_tool
|
5 |
from loguru import logger
|
|
|
1 |
import gradio as gr
|
2 |
+
from langchain_community.chat_message_histories import ChatMessageHistory
|
3 |
from langchain.tools import tool
|
4 |
from langchain_core.utils.function_calling import convert_to_openai_tool
|
5 |
from loguru import logger
|