repo_id
stringclasses 20
values | file_path
stringlengths 44
119
| content
stringlengths 460
28.1k
| __index_level_0__
int64 0
0
| index
int64 90
4.01k
| secrets
stringlengths 68
2.61k
| has_secrets
bool 1
class | number_secrets
int64 1
28
| new_content
stringlengths 454
28.4k
| modified
bool 1
class | references
stringlengths 469
28.8k
|
---|---|---|---|---|---|---|---|---|---|---|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests/document_loaders/test_confluence.py | import unittest
from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.confluence import ConfluenceLoader, ContentFormat
@pytest.fixture
def mock_confluence(): # type: ignore
with patch("atlassian.Confluence") as mock_confluence:
yield mock_confluence
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL = "https://example.atlassian.com/wiki"
MOCK_USERNAME = "user@gmail.com"
MOCK_API_TOKEN = "api_token"
MOCK_SPACE_KEY = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="user@gmail.com",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_invalid(self) -> None:
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
token="foo",
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
oauth2={
"access_token": "bar",
"access_token_secret": "bar",
"consumer_key": "bar",
"key_cert": "bar",
},
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
session=requests.Session(),
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ["123", "456"]
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123", ContentFormat.VIEW),
self._get_mock_page("456", ContentFormat.VIEW),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(
space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW,
keep_markdown_format=True,
max_pages=2,
)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123\n\n"
assert documents[1].page_content == "Content 456\n\n"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(
self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE
) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {
f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"}
},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
| 0 | 1,929 | [{"tag": "EMAIL", "value": "user@gmail.com", "start": 551, "end": 565}, {"tag": "EMAIL", "value": "user@gmail.com", "start": 983, "end": 997}] | true | 2 | import unittest
from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.confluence import ConfluenceLoader, ContentFormat
@pytest.fixture
def mock_confluence(): # type: ignore
with patch("atlassian.Confluence") as mock_confluence:
yield mock_confluence
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL = "https://example.atlassian.com/wiki"
MOCK_USERNAME = "kenaa@example.com"
MOCK_API_TOKEN = "api_token"
MOCK_SPACE_KEY = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="kenaa@example.com",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_invalid(self) -> None:
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
token="foo",
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
oauth2={
"access_token": "bar",
"access_token_secret": "bar",
"consumer_key": "bar",
"key_cert": "bar",
},
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
session=requests.Session(),
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ["123", "456"]
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123", ContentFormat.VIEW),
self._get_mock_page("456", ContentFormat.VIEW),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(
space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW,
keep_markdown_format=True,
max_pages=2,
)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123\n\n"
assert documents[1].page_content == "Content 456\n\n"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(
self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE
) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {
f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"}
},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
| true | import unittest
from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.confluence import ConfluenceLoader, ContentFormat
@pytest.fixture
def mock_confluence(): # type: ignore
with patch("atlassian.Confluence") as mock_confluence:
yield mock_confluence
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL = "https://example.atlassian.com/wiki"
MOCK_USERNAME = "PI:EMAIL:kenaa@example.comEND_PI"
MOCK_API_TOKEN = "api_token"
MOCK_SPACE_KEY = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="PI:EMAIL:kenaa@example.comEND_PI",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_invalid(self) -> None:
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
token="foo",
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
oauth2={
"access_token": "bar",
"access_token_secret": "bar",
"consumer_key": "bar",
"key_cert": "bar",
},
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
session=requests.Session(),
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ["123", "456"]
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123", ContentFormat.VIEW),
self._get_mock_page("456", ContentFormat.VIEW),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(
space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW,
keep_markdown_format=True,
max_pages=2,
)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123\n\n"
assert documents[1].page_content == "Content 456\n\n"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(
self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE
) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {
f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"}
},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
|
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/llms/replicate.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
class Config:
"""Configuration for this pydantic config."""
allow_population_by_field_name = True
extra = Extra.forbid
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
yield GenerationChunk(text=output)
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
| 0 | 2,192 | [{"tag": "KEY", "value": "27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478", "start": 1136, "end": 1200}] | true | 1 | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
class Config:
"""Configuration for this pydantic config."""
allow_population_by_field_name = True
extra = Extra.forbid
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
yield GenerationChunk(text=output)
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
| true | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"PI:KEY:ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7END_PI",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
class Config:
"""Configuration for this pydantic config."""
allow_population_by_field_name = True
extra = Extra.forbid
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
yield GenerationChunk(text=output)
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
|
hf_public_repos/gkamradt/langchain-tutorials/data | hf_public_repos/gkamradt/langchain-tutorials/data/thefuzz/setup.py | #!/usr/bin/env python
# Copyright (c) 2014 SeatGeek
# This file is part of thefuzz.
from thefuzz import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name='thefuzz',
version=__version__,
author='Adam Cohen',
author_email='adam@seatgeek.com',
packages=['thefuzz'],
extras_require={'speedup': ['python-levenshtein>=0.12']},
url='https://github.com/seatgeek/thefuzz',
license="GPLv2",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3 :: Only',
],
description='Fuzzy string matching in python',
long_description=open_file('README.rst').read(),
zip_safe=True,
)
| 0 | 136 | [{"tag": "EMAIL", "value": "adam@seatgeek.com", "start": 410, "end": 427}] | true | 1 | #!/usr/bin/env python
# Copyright (c) 2014 SeatGeek
# This file is part of thefuzz.
from thefuzz import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name='thefuzz',
version=__version__,
author='Adam Cohen',
author_email='envkt@example.com',
packages=['thefuzz'],
extras_require={'speedup': ['python-levenshtein>=0.12']},
url='https://github.com/seatgeek/thefuzz',
license="GPLv2",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3 :: Only',
],
description='Fuzzy string matching in python',
long_description=open_file('README.rst').read(),
zip_safe=True,
)
| true | #!/usr/bin/env python
# Copyright (c) 2014 SeatGeek
# This file is part of thefuzz.
from thefuzz import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name='thefuzz',
version=__version__,
author='Adam Cohen',
author_email='PI:EMAIL:envkt@example.comEND_PI',
packages=['thefuzz'],
extras_require={'speedup': ['python-levenshtein>=0.12']},
url='https://github.com/seatgeek/thefuzz',
license="GPLv2",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3 :: Only',
],
description='Fuzzy string matching in python',
long_description=open_file('README.rst').read(),
zip_safe=True,
)
|
hf_public_repos/zilliztech/GPTCache/tests/unit_tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/processor/test_context.py | from tempfile import TemporaryDirectory
from typing import Any, Dict
from unittest.mock import patch
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.processor import ContextProcess
from gptcache.processor.pre import all_content
from gptcache.utils.response import get_message_from_openai_answer
class CITestContextProcess(ContextProcess):
def __init__(self):
self.content = ""
def format_all_content(self, data: Dict[str, Any], **params: Dict[str, Any]):
self.content = all_content(data)
def process_all_content(self) -> (Any, Any):
save_content = self.content.upper()
embedding_content = self.content
return save_content, embedding_content
def test_context_process():
with TemporaryDirectory(dir="./") as root:
map_manager = manager_factory(data_dir=root)
context_process = CITestContextProcess()
cache.init(
pre_embedding_func=context_process.pre_process, data_manager=map_manager
)
question = "test calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
cache.flush()
map_manager = manager_factory(data_dir=root)
content = f"You are a helpful assistant.\n{question}"
cache_answer = map_manager.search(content)[0]
assert cache_answer[0] == content.upper()
assert cache_answer[1].answer == expect_answer
assert cache_answer[2] == content
| 0 | 866 | [{"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 1561, "end": 1599}] | true | 1 | from tempfile import TemporaryDirectory
from typing import Any, Dict
from unittest.mock import patch
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.processor import ContextProcess
from gptcache.processor.pre import all_content
from gptcache.utils.response import get_message_from_openai_answer
class CITestContextProcess(ContextProcess):
def __init__(self):
self.content = ""
def format_all_content(self, data: Dict[str, Any], **params: Dict[str, Any]):
self.content = all_content(data)
def process_all_content(self) -> (Any, Any):
save_content = self.content.upper()
embedding_content = self.content
return save_content, embedding_content
def test_context_process():
with TemporaryDirectory(dir="./") as root:
map_manager = manager_factory(data_dir=root)
context_process = CITestContextProcess()
cache.init(
pre_embedding_func=context_process.pre_process, data_manager=map_manager
)
question = "test calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "949d1u22cbffbrarjh182eig55721odj",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
cache.flush()
map_manager = manager_factory(data_dir=root)
content = f"You are a helpful assistant.\n{question}"
cache_answer = map_manager.search(content)[0]
assert cache_answer[0] == content.upper()
assert cache_answer[1].answer == expect_answer
assert cache_answer[2] == content
| true | from tempfile import TemporaryDirectory
from typing import Any, Dict
from unittest.mock import patch
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.processor import ContextProcess
from gptcache.processor.pre import all_content
from gptcache.utils.response import get_message_from_openai_answer
class CITestContextProcess(ContextProcess):
def __init__(self):
self.content = ""
def format_all_content(self, data: Dict[str, Any], **params: Dict[str, Any]):
self.content = all_content(data)
def process_all_content(self) -> (Any, Any):
save_content = self.content.upper()
embedding_content = self.content
return save_content, embedding_content
def test_context_process():
with TemporaryDirectory(dir="./") as root:
map_manager = manager_factory(data_dir=root)
context_process = CITestContextProcess()
cache.init(
pre_embedding_func=context_process.pre_process, data_manager=map_manager
)
question = "test calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:949d1u22cbffbrarjh182eig55721odjEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
cache.flush()
map_manager = manager_factory(data_dir=root)
content = f"You are a helpful assistant.\n{question}"
cache_answer = map_manager.search(content)[0]
assert cache_answer[0] == content.upper()
assert cache_answer[1].answer == expect_answer
assert cache_answer[2] == content
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/vectorstores/vearch.ipynb | from langchain.document_loaders import TextLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from transformers import AutoModel, AutoTokenizer
from langchain.vectorstores.vearch import Vearch
# repalce to your local model path
model_path = "/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0)query = "你好!"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
query = "你知道凌波微步吗,你知道都有谁学会了吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")# Add your local knowledge files
file_path = "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt" # Your local file path"
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
# split text into sentences and embedding the sentences
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
# replace to your model path
embedding_path = "/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese"
embeddings = HuggingFaceEmbeddings(model_name=embedding_path)# first add your document into vearch vectorstore
vearch_standalone = Vearch.from_documents(
texts,
embeddings,
path_or_url="/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/localdb_new_test",
table_name="localdb_new_test",
flag=0,
)
print("***************after is cluster res*****************")
vearch_cluster = Vearch.from_documents(
texts,
embeddings,
path_or_url="http://test-vearch-langchain-router.vectorbase.svc.ht1.n.jd.local",
db_name="vearch_cluster_langchian",
table_name="tobenumone",
flag=1,
)query = "你知道凌波微步吗,你知道都有谁会凌波微步?"
vearch_standalone_res = vearch_standalone.similarity_search(query, 3)
for idx, tmp in enumerate(vearch_standalone_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context = "".join([tmp.page_content for tmp in vearch_standalone_res])
new_query = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context} \n 回答用户这个问题:{query}\n\n"
response, history = model.chat(tokenizer, new_query, history=[])
print(f"********ChatGLM:{response}\n")
print("***************************after is cluster res******************************")
query_c = "你知道凌波微步吗,你知道都有谁会凌波微步?"
cluster_res = vearch_cluster.similarity_search(query_c, 3)
for idx, tmp in enumerate(cluster_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context_c = "".join([tmp.page_content for tmp in cluster_res])
new_query_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context_c} \n 回答用户这个问题:{query_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query_c, history=[])
print(f"********ChatGLM:{response_c}\n")query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")
vearch_info = [
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
vearch_source = [
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
]
vearch_standalone.add_texts(vearch_info, vearch_source)
print("*****************after is cluster res********************")
vearch_cluster.add_texts(vearch_info, vearch_source)query3 = "你知道vearch是什么吗?"
res1 = vearch_standalone.similarity_search(query3, 3)
for idx, tmp in enumerate(res1):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1 = "".join([tmp.page_content for tmp in res1])
new_query1 = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1} \n 回答用户这个问题:{query3}\n\n"
response, history = model.chat(tokenizer, new_query1, history=[])
print(f"***************ChatGLM:{response}\n")
print("***************after is cluster res******************")
query3_c = "你知道vearch是什么吗?"
res1_c = vearch_standalone.similarity_search(query3_c, 3)
for idx, tmp in enumerate(res1_c):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1_C = "".join([tmp.page_content for tmp in res1_c])
new_query1_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1_C} \n 回答用户这个问题:{query3_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query1_c, history=[])
print(f"***************ChatGLM:{response_c}\n")##delete and get function need to maintian docids
##your docid
res_d = vearch_standalone.delete(
[
"eee5e7468434427eb49829374c1e8220",
"2776754da8fc4bb58d3e482006010716",
"9223acd6d89d4c2c84ff42677ac0d47c",
]
)
print("delete vearch standalone docid", res_d)
query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
res_cluster = vearch_cluster.delete(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("delete vearch cluster docid", res_cluster)
query_c = "你知道vearch是什么吗?"
response_c, history = model.chat(tokenizer, query_c, history=[])
print(f"Human: {query}\nChatGLM:{response_c}\n")
get_delet_doc = vearch_standalone.get(
[
"eee5e7468434427eb49829374c1e8220",
"2776754da8fc4bb58d3e482006010716",
"9223acd6d89d4c2c84ff42677ac0d47c",
]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_standalone.get(
[
"18ce6747dca04a2c833e60e8dfd83c04",
"aafacb0e46574b378a9f433877ab06a8",
"9776bccfdd8643a8b219ccee0596f370",
"9223acd6d89d4c2c84ff42677ac0d47c",
]
)
print("get existed docid", get_id_doc)
get_delet_doc = vearch_cluster.get(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_cluster.get(
[
"1841638988191686991",
"-4519586577642625749",
"5028230008472292907",
"1342026762029067927",
]
)
print("get existed docid", get_id_doc) | 0 | 3,535 | [{"tag": "KEY", "value": "2776754da8fc4bb58d3e482006010716", "start": 5018, "end": 5050}, {"tag": "KEY", "value": "2776754da8fc4bb58d3e482006010716", "start": 5692, "end": 5724}, {"tag": "KEY", "value": "9776bccfdd8643a8b219ccee0596f370", "start": 5977, "end": 6009}, {"tag": "KEY", "value": "9223acd6d89d4c2c84ff42677ac0d47c", "start": 5062, "end": 5094}, {"tag": "KEY", "value": "9223acd6d89d4c2c84ff42677ac0d47c", "start": 5736, "end": 5768}, {"tag": "KEY", "value": "9223acd6d89d4c2c84ff42677ac0d47c", "start": 6021, "end": 6053}] | true | 6 | from langchain.document_loaders import TextLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from transformers import AutoModel, AutoTokenizer
from langchain.vectorstores.vearch import Vearch
# repalce to your local model path
model_path = "/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0)query = "你好!"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
query = "你知道凌波微步吗,你知道都有谁学会了吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")# Add your local knowledge files
file_path = "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt" # Your local file path"
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
# split text into sentences and embedding the sentences
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
# replace to your model path
embedding_path = "/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese"
embeddings = HuggingFaceEmbeddings(model_name=embedding_path)# first add your document into vearch vectorstore
vearch_standalone = Vearch.from_documents(
texts,
embeddings,
path_or_url="/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/localdb_new_test",
table_name="localdb_new_test",
flag=0,
)
print("***************after is cluster res*****************")
vearch_cluster = Vearch.from_documents(
texts,
embeddings,
path_or_url="http://test-vearch-langchain-router.vectorbase.svc.ht1.n.jd.local",
db_name="vearch_cluster_langchian",
table_name="tobenumone",
flag=1,
)query = "你知道凌波微步吗,你知道都有谁会凌波微步?"
vearch_standalone_res = vearch_standalone.similarity_search(query, 3)
for idx, tmp in enumerate(vearch_standalone_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context = "".join([tmp.page_content for tmp in vearch_standalone_res])
new_query = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context} \n 回答用户这个问题:{query}\n\n"
response, history = model.chat(tokenizer, new_query, history=[])
print(f"********ChatGLM:{response}\n")
print("***************************after is cluster res******************************")
query_c = "你知道凌波微步吗,你知道都有谁会凌波微步?"
cluster_res = vearch_cluster.similarity_search(query_c, 3)
for idx, tmp in enumerate(cluster_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context_c = "".join([tmp.page_content for tmp in cluster_res])
new_query_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context_c} \n 回答用户这个问题:{query_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query_c, history=[])
print(f"********ChatGLM:{response_c}\n")query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")
vearch_info = [
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
vearch_source = [
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
]
vearch_standalone.add_texts(vearch_info, vearch_source)
print("*****************after is cluster res********************")
vearch_cluster.add_texts(vearch_info, vearch_source)query3 = "你知道vearch是什么吗?"
res1 = vearch_standalone.similarity_search(query3, 3)
for idx, tmp in enumerate(res1):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1 = "".join([tmp.page_content for tmp in res1])
new_query1 = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1} \n 回答用户这个问题:{query3}\n\n"
response, history = model.chat(tokenizer, new_query1, history=[])
print(f"***************ChatGLM:{response}\n")
print("***************after is cluster res******************")
query3_c = "你知道vearch是什么吗?"
res1_c = vearch_standalone.similarity_search(query3_c, 3)
for idx, tmp in enumerate(res1_c):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1_C = "".join([tmp.page_content for tmp in res1_c])
new_query1_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1_C} \n 回答用户这个问题:{query3_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query1_c, history=[])
print(f"***************ChatGLM:{response_c}\n")##delete and get function need to maintian docids
##your docid
res_d = vearch_standalone.delete(
[
"eee5e7468434427eb49829374c1e8220",
"caf86f4uutaoxfysmf7anj01xl6sv3ps",
"74t3tndxag9o7h0890bnpfzh4olk2h9x",
]
)
print("delete vearch standalone docid", res_d)
query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
res_cluster = vearch_cluster.delete(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("delete vearch cluster docid", res_cluster)
query_c = "你知道vearch是什么吗?"
response_c, history = model.chat(tokenizer, query_c, history=[])
print(f"Human: {query}\nChatGLM:{response_c}\n")
get_delet_doc = vearch_standalone.get(
[
"eee5e7468434427eb49829374c1e8220",
"caf86f4uutaoxfysmf7anj01xl6sv3ps",
"74t3tndxag9o7h0890bnpfzh4olk2h9x",
]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_standalone.get(
[
"18ce6747dca04a2c833e60e8dfd83c04",
"aafacb0e46574b378a9f433877ab06a8",
"ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"74t3tndxag9o7h0890bnpfzh4olk2h9x",
]
)
print("get existed docid", get_id_doc)
get_delet_doc = vearch_cluster.get(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_cluster.get(
[
"1841638988191686991",
"-4519586577642625749",
"5028230008472292907",
"1342026762029067927",
]
)
print("get existed docid", get_id_doc) | true | from langchain.document_loaders import TextLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from transformers import AutoModel, AutoTokenizer
from langchain.vectorstores.vearch import Vearch
# repalce to your local model path
model_path = "/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0)query = "你好!"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
query = "你知道凌波微步吗,你知道都有谁学会了吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")# Add your local knowledge files
file_path = "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt" # Your local file path"
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
# split text into sentences and embedding the sentences
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
# replace to your model path
embedding_path = "/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese"
embeddings = HuggingFaceEmbeddings(model_name=embedding_path)# first add your document into vearch vectorstore
vearch_standalone = Vearch.from_documents(
texts,
embeddings,
path_or_url="/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/localdb_new_test",
table_name="localdb_new_test",
flag=0,
)
print("***************after is cluster res*****************")
vearch_cluster = Vearch.from_documents(
texts,
embeddings,
path_or_url="http://test-vearch-langchain-router.vectorbase.svc.ht1.n.jd.local",
db_name="vearch_cluster_langchian",
table_name="tobenumone",
flag=1,
)query = "你知道凌波微步吗,你知道都有谁会凌波微步?"
vearch_standalone_res = vearch_standalone.similarity_search(query, 3)
for idx, tmp in enumerate(vearch_standalone_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context = "".join([tmp.page_content for tmp in vearch_standalone_res])
new_query = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context} \n 回答用户这个问题:{query}\n\n"
response, history = model.chat(tokenizer, new_query, history=[])
print(f"********ChatGLM:{response}\n")
print("***************************after is cluster res******************************")
query_c = "你知道凌波微步吗,你知道都有谁会凌波微步?"
cluster_res = vearch_cluster.similarity_search(query_c, 3)
for idx, tmp in enumerate(cluster_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context_c = "".join([tmp.page_content for tmp in cluster_res])
new_query_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context_c} \n 回答用户这个问题:{query_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query_c, history=[])
print(f"********ChatGLM:{response_c}\n")query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")
vearch_info = [
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
vearch_source = [
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
]
vearch_standalone.add_texts(vearch_info, vearch_source)
print("*****************after is cluster res********************")
vearch_cluster.add_texts(vearch_info, vearch_source)query3 = "你知道vearch是什么吗?"
res1 = vearch_standalone.similarity_search(query3, 3)
for idx, tmp in enumerate(res1):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1 = "".join([tmp.page_content for tmp in res1])
new_query1 = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1} \n 回答用户这个问题:{query3}\n\n"
response, history = model.chat(tokenizer, new_query1, history=[])
print(f"***************ChatGLM:{response}\n")
print("***************after is cluster res******************")
query3_c = "你知道vearch是什么吗?"
res1_c = vearch_standalone.similarity_search(query3_c, 3)
for idx, tmp in enumerate(res1_c):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1_C = "".join([tmp.page_content for tmp in res1_c])
new_query1_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1_C} \n 回答用户这个问题:{query3_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query1_c, history=[])
print(f"***************ChatGLM:{response_c}\n")##delete and get function need to maintian docids
##your docid
res_d = vearch_standalone.delete(
[
"eee5e7468434427eb49829374c1e8220",
"PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"PI:KEY:74t3tndxag9o7h0890bnpfzh4olk2h9xEND_PI",
]
)
print("delete vearch standalone docid", res_d)
query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
res_cluster = vearch_cluster.delete(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("delete vearch cluster docid", res_cluster)
query_c = "你知道vearch是什么吗?"
response_c, history = model.chat(tokenizer, query_c, history=[])
print(f"Human: {query}\nChatGLM:{response_c}\n")
get_delet_doc = vearch_standalone.get(
[
"eee5e7468434427eb49829374c1e8220",
"PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"PI:KEY:74t3tndxag9o7h0890bnpfzh4olk2h9xEND_PI",
]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_standalone.get(
[
"18ce6747dca04a2c833e60e8dfd83c04",
"aafacb0e46574b378a9f433877ab06a8",
"PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"PI:KEY:74t3tndxag9o7h0890bnpfzh4olk2h9xEND_PI",
]
)
print("get existed docid", get_id_doc)
get_delet_doc = vearch_cluster.get(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_cluster.get(
[
"1841638988191686991",
"-4519586577642625749",
"5028230008472292907",
"1342026762029067927",
]
)
print("get existed docid", get_id_doc) |
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/retrievers/you.py | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
class YouRetriever(BaseRetriever):
"""`You` retriever that uses You.com's search API.
To connect to the You.com api requires an API key which
you can get by emailing api@you.com.
You can check out our docs at https://documentation.you.com.
You need to set the environment variable `YDC_API_KEY` for retriever to operate.
"""
ydc_api_key: str
k: Optional[int] = None
endpoint_type: str = "web"
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
values["ydc_api_key"] = get_from_dict_or_env(
values, "ydc_api_key", "YDC_API_KEY"
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
import requests
headers = {"X-API-Key": self.ydc_api_key}
if self.endpoint_type == "web":
results = requests.get(
f"https://api.ydc-index.io/search?query={query}",
headers=headers,
).json()
docs = []
for hit in results["hits"]:
for snippet in hit["snippets"]:
docs.append(Document(page_content=snippet))
if self.k is not None and len(docs) >= self.k:
return docs
return docs
elif self.endpoint_type == "snippet":
results = requests.get(
f"https://api.ydc-index.io/snippet_search?query={query}",
headers=headers,
).json()
return [Document(page_content=snippet) for snippet in results]
else:
raise RuntimeError(f"Invalid endpoint type provided {self.endpoint_type}")
| 0 | 2,956 | [{"tag": "EMAIL", "value": "api@you.com", "start": 449, "end": 460}] | true | 1 | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
class YouRetriever(BaseRetriever):
"""`You` retriever that uses You.com's search API.
To connect to the You.com api requires an API key which
you can get by emailing kenaa@example.com.
You can check out our docs at https://documentation.you.com.
You need to set the environment variable `YDC_API_KEY` for retriever to operate.
"""
ydc_api_key: str
k: Optional[int] = None
endpoint_type: str = "web"
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
values["ydc_api_key"] = get_from_dict_or_env(
values, "ydc_api_key", "YDC_API_KEY"
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
import requests
headers = {"X-API-Key": self.ydc_api_key}
if self.endpoint_type == "web":
results = requests.get(
f"https://api.ydc-index.io/search?query={query}",
headers=headers,
).json()
docs = []
for hit in results["hits"]:
for snippet in hit["snippets"]:
docs.append(Document(page_content=snippet))
if self.k is not None and len(docs) >= self.k:
return docs
return docs
elif self.endpoint_type == "snippet":
results = requests.get(
f"https://api.ydc-index.io/snippet_search?query={query}",
headers=headers,
).json()
return [Document(page_content=snippet) for snippet in results]
else:
raise RuntimeError(f"Invalid endpoint type provided {self.endpoint_type}")
| true | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
class YouRetriever(BaseRetriever):
"""`You` retriever that uses You.com's search API.
To connect to the You.com api requires an API key which
you can get by emailing PI:EMAIL:kenaa@example.comEND_PI.
You can check out our docs at https://documentation.you.com.
You need to set the environment variable `YDC_API_KEY` for retriever to operate.
"""
ydc_api_key: str
k: Optional[int] = None
endpoint_type: str = "web"
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
values["ydc_api_key"] = get_from_dict_or_env(
values, "ydc_api_key", "YDC_API_KEY"
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
import requests
headers = {"X-API-Key": self.ydc_api_key}
if self.endpoint_type == "web":
results = requests.get(
f"https://api.ydc-index.io/search?query={query}",
headers=headers,
).json()
docs = []
for hit in results["hits"]:
for snippet in hit["snippets"]:
docs.append(Document(page_content=snippet))
if self.k is not None and len(docs) >= self.k:
return docs
return docs
elif self.endpoint_type == "snippet":
results = requests.get(
f"https://api.ydc-index.io/snippet_search?query={query}",
headers=headers,
).json()
return [Document(page_content=snippet) for snippet in results]
else:
raise RuntimeError(f"Invalid endpoint type provided {self.endpoint_type}")
|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests/document_loaders/test_git.py | import os
import py
import pytest
from langchain.document_loaders import GitLoader
def init_repo(tmpdir: py.path.local, dir_name: str) -> str:
from git import Repo
repo_dir = tmpdir.mkdir(dir_name)
repo = Repo.init(repo_dir)
git = repo.git
git.checkout(b="main")
git.config("user.name", "Test User")
git.config("user.email", "test@example.com")
sample_file = "file.txt"
with open(os.path.join(repo_dir, sample_file), "w") as f:
f.write("content")
git.add([sample_file])
git.commit(m="Initial commit")
return str(repo_dir)
@pytest.mark.requires("git")
def test_load_twice(tmpdir: py.path.local) -> None:
"""
Test that loading documents twice from the same repository does not raise an error.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
documents = loader.load()
assert len(documents) == 1
@pytest.mark.requires("git")
def test_clone_different_repo(tmpdir: py.path.local) -> None:
"""
Test that trying to clone a different repository into a directory already
containing a clone raises a ValueError.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
other_clone_url = init_repo(tmpdir, "other_remote_repo")
other_loader = GitLoader(repo_path=repo_path, clone_url=other_clone_url)
with pytest.raises(ValueError):
other_loader.load()
| 0 | 1,938 | [{"tag": "EMAIL", "value": "test@example.com", "start": 360, "end": 376}] | true | 1 | import os
import py
import pytest
from langchain.document_loaders import GitLoader
def init_repo(tmpdir: py.path.local, dir_name: str) -> str:
from git import Repo
repo_dir = tmpdir.mkdir(dir_name)
repo = Repo.init(repo_dir)
git = repo.git
git.checkout(b="main")
git.config("user.name", "Test User")
git.config("user.email", "ychag@example.com")
sample_file = "file.txt"
with open(os.path.join(repo_dir, sample_file), "w") as f:
f.write("content")
git.add([sample_file])
git.commit(m="Initial commit")
return str(repo_dir)
@pytest.mark.requires("git")
def test_load_twice(tmpdir: py.path.local) -> None:
"""
Test that loading documents twice from the same repository does not raise an error.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
documents = loader.load()
assert len(documents) == 1
@pytest.mark.requires("git")
def test_clone_different_repo(tmpdir: py.path.local) -> None:
"""
Test that trying to clone a different repository into a directory already
containing a clone raises a ValueError.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
other_clone_url = init_repo(tmpdir, "other_remote_repo")
other_loader = GitLoader(repo_path=repo_path, clone_url=other_clone_url)
with pytest.raises(ValueError):
other_loader.load()
| true | import os
import py
import pytest
from langchain.document_loaders import GitLoader
def init_repo(tmpdir: py.path.local, dir_name: str) -> str:
from git import Repo
repo_dir = tmpdir.mkdir(dir_name)
repo = Repo.init(repo_dir)
git = repo.git
git.checkout(b="main")
git.config("user.name", "Test User")
git.config("user.email", "PI:EMAIL:ychag@example.comEND_PI")
sample_file = "file.txt"
with open(os.path.join(repo_dir, sample_file), "w") as f:
f.write("content")
git.add([sample_file])
git.commit(m="Initial commit")
return str(repo_dir)
@pytest.mark.requires("git")
def test_load_twice(tmpdir: py.path.local) -> None:
"""
Test that loading documents twice from the same repository does not raise an error.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
documents = loader.load()
assert len(documents) == 1
@pytest.mark.requires("git")
def test_clone_different_repo(tmpdir: py.path.local) -> None:
"""
Test that trying to clone a different repository into a directory already
containing a clone raises a ValueError.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
other_clone_url = init_repo(tmpdir, "other_remote_repo")
other_loader = GitLoader(repo_path=repo_path, clone_url=other_clone_url)
with pytest.raises(ValueError):
other_loader.load()
|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests/vectorstores/test_atlas.py | """Test Atlas functionality."""
import time
from langchain.vectorstores import AtlasDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
ATLAS_TEST_API_KEY = "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6"
def test_atlas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_atlas_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
metadatas=metadatas,
reset_project_if_exists=True,
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == "0"
| 0 | 1,441 | [{"tag": "KEY", "value": "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6", "start": 191, "end": 236}] | true | 1 | """Test Atlas functionality."""
import time
from langchain.vectorstores import AtlasDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
ATLAS_TEST_API_KEY = "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b"
def test_atlas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_atlas_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
metadatas=metadatas,
reset_project_if_exists=True,
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == "0"
| true | """Test Atlas functionality."""
import time
from langchain.vectorstores import AtlasDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
ATLAS_TEST_API_KEY = "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI"
def test_atlas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_atlas_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
metadatas=metadatas,
reset_project_if_exists=True,
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == "0"
|
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/utilities/pubmed.py | import json
import logging
import time
import urllib.error
import urllib.parse
import urllib.request
from typing import Any, Dict, Iterator, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
class PubMedAPIWrapper(BaseModel):
"""
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters:
top_k_results: number of the top-scored document used for the PubMed tool
MAX_QUERY_LENGTH: maximum length of the query.
Default is 300 characters.
doc_content_chars_max: maximum length of the document content.
Content will be truncated if it exceeds this length.
Default is 2000 characters.
max_retry: maximum number of retries for a request. Default is 5.
sleep_time: time to wait between retries.
Default is 0.2 seconds.
email: email address to be used for the PubMed API.
"""
parse: Any #: :meta private:
base_url_esearch: str = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
)
base_url_efetch: str = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
max_retry: int = 5
sleep_time: float = 0.2
# Default values for the parameters
top_k_results: int = 3
MAX_QUERY_LENGTH: int = 300
doc_content_chars_max: int = 2000
email: str = "your_email@example.com"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import xmltodict
values["parse"] = xmltodict.parse
except ImportError:
raise ImportError(
"Could not import xmltodict python package. "
"Please install it with `pip install xmltodict`."
)
return values
def run(self, query: str) -> str:
"""
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
"""
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['Published']}\n"
f"Title: {result['Title']}\n"
f"Copyright Information: {result['Copyright Information']}\n"
f"Summary::\n{result['Summary']}"
for result in self.load(query[: self.MAX_QUERY_LENGTH])
]
# Join the results and limit the character count
return (
"\n\n".join(docs)[: self.doc_content_chars_max]
if docs
else "No good PubMed Result was found"
)
except Exception as ex:
return f"PubMed exception: {ex}"
def lazy_load(self, query: str) -> Iterator[dict]:
"""
Search PubMed for documents matching the query.
Return an iterator of dictionaries containing the document metadata.
"""
url = (
self.base_url_esearch
+ "db=pubmed&term="
+ str({urllib.parse.quote(query)})
+ f"&retmode=json&retmax={self.top_k_results}&usehistory=y"
)
result = urllib.request.urlopen(url)
text = result.read().decode("utf-8")
json_text = json.loads(text)
webenv = json_text["esearchresult"]["webenv"]
for uid in json_text["esearchresult"]["idlist"]:
yield self.retrieve_article(uid, webenv)
def load(self, query: str) -> List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
def _dict2document(self, doc: dict) -> Document:
summary = doc.pop("Summary")
return Document(page_content=summary, metadata=doc)
def lazy_load_docs(self, query: str) -> Iterator[Document]:
for d in self.lazy_load(query=query):
yield self._dict2document(d)
def load_docs(self, query: str) -> List[Document]:
return list(self.lazy_load_docs(query=query))
def retrieve_article(self, uid: str, webenv: str) -> dict:
url = (
self.base_url_efetch
+ "db=pubmed&retmode=xml&id="
+ uid
+ "&webenv="
+ webenv
)
retry = 0
while True:
try:
result = urllib.request.urlopen(url)
break
except urllib.error.HTTPError as e:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests errors
# wait for an exponentially increasing amount of time
print(
f"Too Many Requests, "
f"waiting for {self.sleep_time:.2f} seconds..."
)
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
else:
raise e
xml_text = result.read().decode("utf-8")
text_dict = self.parse(xml_text)
return self._parse_article(uid, text_dict)
def _parse_article(self, uid: str, text_dict: dict) -> dict:
try:
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
"Article"
]
except KeyError:
ar = text_dict["PubmedArticleSet"]["PubmedBookArticle"]["BookDocument"]
abstract_text = ar.get("Abstract", {}).get("AbstractText", [])
summaries = [
f"{txt['@Label']}: {txt['#text']}"
for txt in abstract_text
if "#text" in txt and "@Label" in txt
]
summary = (
"\n".join(summaries)
if summaries
else (
abstract_text
if isinstance(abstract_text, str)
else (
"\n".join(str(value) for value in abstract_text.values())
if isinstance(abstract_text, dict)
else "No abstract available"
)
)
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
)
return {
"uid": uid,
"Title": ar.get("ArticleTitle", ""),
"Published": pub_date,
"Copyright Information": ar.get("Abstract", {}).get(
"CopyrightInformation", ""
),
"Summary": summary,
}
| 0 | 2,689 | [{"tag": "EMAIL", "value": "your_email@example.com", "start": 1576, "end": 1598}] | true | 1 | import json
import logging
import time
import urllib.error
import urllib.parse
import urllib.request
from typing import Any, Dict, Iterator, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
class PubMedAPIWrapper(BaseModel):
"""
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters:
top_k_results: number of the top-scored document used for the PubMed tool
MAX_QUERY_LENGTH: maximum length of the query.
Default is 300 characters.
doc_content_chars_max: maximum length of the document content.
Content will be truncated if it exceeds this length.
Default is 2000 characters.
max_retry: maximum number of retries for a request. Default is 5.
sleep_time: time to wait between retries.
Default is 0.2 seconds.
email: email address to be used for the PubMed API.
"""
parse: Any #: :meta private:
base_url_esearch: str = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
)
base_url_efetch: str = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
max_retry: int = 5
sleep_time: float = 0.2
# Default values for the parameters
top_k_results: int = 3
MAX_QUERY_LENGTH: int = 300
doc_content_chars_max: int = 2000
email: str = "anpch@example.com"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import xmltodict
values["parse"] = xmltodict.parse
except ImportError:
raise ImportError(
"Could not import xmltodict python package. "
"Please install it with `pip install xmltodict`."
)
return values
def run(self, query: str) -> str:
"""
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
"""
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['Published']}\n"
f"Title: {result['Title']}\n"
f"Copyright Information: {result['Copyright Information']}\n"
f"Summary::\n{result['Summary']}"
for result in self.load(query[: self.MAX_QUERY_LENGTH])
]
# Join the results and limit the character count
return (
"\n\n".join(docs)[: self.doc_content_chars_max]
if docs
else "No good PubMed Result was found"
)
except Exception as ex:
return f"PubMed exception: {ex}"
def lazy_load(self, query: str) -> Iterator[dict]:
"""
Search PubMed for documents matching the query.
Return an iterator of dictionaries containing the document metadata.
"""
url = (
self.base_url_esearch
+ "db=pubmed&term="
+ str({urllib.parse.quote(query)})
+ f"&retmode=json&retmax={self.top_k_results}&usehistory=y"
)
result = urllib.request.urlopen(url)
text = result.read().decode("utf-8")
json_text = json.loads(text)
webenv = json_text["esearchresult"]["webenv"]
for uid in json_text["esearchresult"]["idlist"]:
yield self.retrieve_article(uid, webenv)
def load(self, query: str) -> List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
def _dict2document(self, doc: dict) -> Document:
summary = doc.pop("Summary")
return Document(page_content=summary, metadata=doc)
def lazy_load_docs(self, query: str) -> Iterator[Document]:
for d in self.lazy_load(query=query):
yield self._dict2document(d)
def load_docs(self, query: str) -> List[Document]:
return list(self.lazy_load_docs(query=query))
def retrieve_article(self, uid: str, webenv: str) -> dict:
url = (
self.base_url_efetch
+ "db=pubmed&retmode=xml&id="
+ uid
+ "&webenv="
+ webenv
)
retry = 0
while True:
try:
result = urllib.request.urlopen(url)
break
except urllib.error.HTTPError as e:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests errors
# wait for an exponentially increasing amount of time
print(
f"Too Many Requests, "
f"waiting for {self.sleep_time:.2f} seconds..."
)
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
else:
raise e
xml_text = result.read().decode("utf-8")
text_dict = self.parse(xml_text)
return self._parse_article(uid, text_dict)
def _parse_article(self, uid: str, text_dict: dict) -> dict:
try:
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
"Article"
]
except KeyError:
ar = text_dict["PubmedArticleSet"]["PubmedBookArticle"]["BookDocument"]
abstract_text = ar.get("Abstract", {}).get("AbstractText", [])
summaries = [
f"{txt['@Label']}: {txt['#text']}"
for txt in abstract_text
if "#text" in txt and "@Label" in txt
]
summary = (
"\n".join(summaries)
if summaries
else (
abstract_text
if isinstance(abstract_text, str)
else (
"\n".join(str(value) for value in abstract_text.values())
if isinstance(abstract_text, dict)
else "No abstract available"
)
)
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
)
return {
"uid": uid,
"Title": ar.get("ArticleTitle", ""),
"Published": pub_date,
"Copyright Information": ar.get("Abstract", {}).get(
"CopyrightInformation", ""
),
"Summary": summary,
}
| true | import json
import logging
import time
import urllib.error
import urllib.parse
import urllib.request
from typing import Any, Dict, Iterator, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
class PubMedAPIWrapper(BaseModel):
"""
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters:
top_k_results: number of the top-scored document used for the PubMed tool
MAX_QUERY_LENGTH: maximum length of the query.
Default is 300 characters.
doc_content_chars_max: maximum length of the document content.
Content will be truncated if it exceeds this length.
Default is 2000 characters.
max_retry: maximum number of retries for a request. Default is 5.
sleep_time: time to wait between retries.
Default is 0.2 seconds.
email: email address to be used for the PubMed API.
"""
parse: Any #: :meta private:
base_url_esearch: str = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
)
base_url_efetch: str = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
max_retry: int = 5
sleep_time: float = 0.2
# Default values for the parameters
top_k_results: int = 3
MAX_QUERY_LENGTH: int = 300
doc_content_chars_max: int = 2000
email: str = "PI:EMAIL:anpch@example.comEND_PI"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import xmltodict
values["parse"] = xmltodict.parse
except ImportError:
raise ImportError(
"Could not import xmltodict python package. "
"Please install it with `pip install xmltodict`."
)
return values
def run(self, query: str) -> str:
"""
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
"""
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['Published']}\n"
f"Title: {result['Title']}\n"
f"Copyright Information: {result['Copyright Information']}\n"
f"Summary::\n{result['Summary']}"
for result in self.load(query[: self.MAX_QUERY_LENGTH])
]
# Join the results and limit the character count
return (
"\n\n".join(docs)[: self.doc_content_chars_max]
if docs
else "No good PubMed Result was found"
)
except Exception as ex:
return f"PubMed exception: {ex}"
def lazy_load(self, query: str) -> Iterator[dict]:
"""
Search PubMed for documents matching the query.
Return an iterator of dictionaries containing the document metadata.
"""
url = (
self.base_url_esearch
+ "db=pubmed&term="
+ str({urllib.parse.quote(query)})
+ f"&retmode=json&retmax={self.top_k_results}&usehistory=y"
)
result = urllib.request.urlopen(url)
text = result.read().decode("utf-8")
json_text = json.loads(text)
webenv = json_text["esearchresult"]["webenv"]
for uid in json_text["esearchresult"]["idlist"]:
yield self.retrieve_article(uid, webenv)
def load(self, query: str) -> List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
def _dict2document(self, doc: dict) -> Document:
summary = doc.pop("Summary")
return Document(page_content=summary, metadata=doc)
def lazy_load_docs(self, query: str) -> Iterator[Document]:
for d in self.lazy_load(query=query):
yield self._dict2document(d)
def load_docs(self, query: str) -> List[Document]:
return list(self.lazy_load_docs(query=query))
def retrieve_article(self, uid: str, webenv: str) -> dict:
url = (
self.base_url_efetch
+ "db=pubmed&retmode=xml&id="
+ uid
+ "&webenv="
+ webenv
)
retry = 0
while True:
try:
result = urllib.request.urlopen(url)
break
except urllib.error.HTTPError as e:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests errors
# wait for an exponentially increasing amount of time
print(
f"Too Many Requests, "
f"waiting for {self.sleep_time:.2f} seconds..."
)
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
else:
raise e
xml_text = result.read().decode("utf-8")
text_dict = self.parse(xml_text)
return self._parse_article(uid, text_dict)
def _parse_article(self, uid: str, text_dict: dict) -> dict:
try:
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
"Article"
]
except KeyError:
ar = text_dict["PubmedArticleSet"]["PubmedBookArticle"]["BookDocument"]
abstract_text = ar.get("Abstract", {}).get("AbstractText", [])
summaries = [
f"{txt['@Label']}: {txt['#text']}"
for txt in abstract_text
if "#text" in txt and "@Label" in txt
]
summary = (
"\n".join(summaries)
if summaries
else (
abstract_text
if isinstance(abstract_text, str)
else (
"\n".join(str(value) for value in abstract_text.values())
if isinstance(abstract_text, dict)
else "No abstract available"
)
)
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
)
return {
"uid": uid,
"Title": ar.get("ArticleTitle", ""),
"Published": pub_date,
"Copyright Information": ar.get("Abstract", {}).get(
"CopyrightInformation", ""
),
"Summary": summary,
}
|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests/document_loaders/test_mastodon.py | """Tests for the Mastodon toots loader"""
from langchain.document_loaders import MastodonTootsLoader
def test_mastodon_toots_loader() -> None:
"""Test Mastodon toots loader with an external query."""
# Query the Mastodon CEO's account
loader = MastodonTootsLoader(
mastodon_accounts=["@Gargron@mastodon.social"], number_toots=1
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["user_info"]["id"] == 1
| 0 | 1,633 | [{"tag": "EMAIL", "value": "Gargron@mastodon.social", "start": 308, "end": 331}] | true | 1 | """Tests for the Mastodon toots loader"""
from langchain.document_loaders import MastodonTootsLoader
def test_mastodon_toots_loader() -> None:
"""Test Mastodon toots loader with an external query."""
# Query the Mastodon CEO's account
loader = MastodonTootsLoader(
mastodon_accounts=["@kenaa@example.com"], number_toots=1
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["user_info"]["id"] == 1
| true | """Tests for the Mastodon toots loader"""
from langchain.document_loaders import MastodonTootsLoader
def test_mastodon_toots_loader() -> None:
"""Test Mastodon toots loader with an external query."""
# Query the Mastodon CEO's account
loader = MastodonTootsLoader(
mastodon_accounts=["@PI:EMAIL:kenaa@example.comEND_PI"], number_toots=1
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["user_info"]["id"] == 1
|
hf_public_repos/zilliztech/GPTCache/tests/unit_tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/adapter/test_langchain_models.py | import asyncio
import os
import random
from unittest.mock import patch
from gptcache import Cache, Config
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache, get
from gptcache.adapter.langchain_models import LangChainLLMs, LangChainChat, _cache_msg_data_convert
from gptcache.processor.pre import get_prompt, last_content_without_template, get_messages_last_content
from gptcache.utils import import_pydantic, import_langchain
from gptcache.utils.response import get_message_from_openai_answer
import_pydantic()
import_langchain()
from langchain import OpenAI, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
def test_langchain_llms():
question = "test_langchain_llms"
expect_answer = "hello"
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = OpenAI(model_name="text-ada-001")
llm = LangChainLLMs(llm=langchain_openai,cache_obj=llm_cache)
assert str(langchain_openai) == str(llm)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = llm(prompt=question)
assert expect_answer == answer
answer = llm(prompt=question)
assert expect_answer == answer
def test_langchain_chats():
question = [HumanMessage(content="test_langchain_chats")]
question2 = [HumanMessage(content="test_langchain_chats2")]
msg = "chat models"
expect_answer = {
"role": "assistant",
"message": msg,
"content": msg,
}
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_messages_last_content,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = ChatOpenAI(temperature=0)
chat = LangChainChat(chat=langchain_openai,cache_obj=llm_cache)
assert chat.get_num_tokens("hello") == langchain_openai.get_num_tokens("hello")
assert chat.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")]) \
== langchain_openai.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")])
with patch("openai.ChatCompletion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
with patch("openai.ChatCompletion.acreate") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
answer = asyncio.run(chat.agenerate([question]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
def test_last_content_without_template():
string_prompt = PromptTemplate.from_template("tell me a joke about {subject}")
template = string_prompt.template
cache_obj = Cache()
data_dir = str(random.random())
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj, pre_func=last_content_without_template, config=Config(template=template))
subject_str = "animal"
expect_answer = "this is a joke"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": string_prompt.format(subject=subject_str)},
],
cache_obj=cache_obj,
)
assert get_message_from_openai_answer(response) == expect_answer, response
cache_obj.flush()
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj)
cache_res = get(str([subject_str]), cache_obj=cache_obj)
print(str([subject_str]))
assert cache_res == expect_answer, cache_res
| 0 | 859 | [{"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 5596, "end": 5634}, {"tag": "KEY", "value": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ", "start": 1446, "end": 1484}, {"tag": "KEY", "value": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ", "start": 3146, "end": 3184}, {"tag": "KEY", "value": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ", "start": 3951, "end": 3989}] | true | 4 | import asyncio
import os
import random
from unittest.mock import patch
from gptcache import Cache, Config
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache, get
from gptcache.adapter.langchain_models import LangChainLLMs, LangChainChat, _cache_msg_data_convert
from gptcache.processor.pre import get_prompt, last_content_without_template, get_messages_last_content
from gptcache.utils import import_pydantic, import_langchain
from gptcache.utils.response import get_message_from_openai_answer
import_pydantic()
import_langchain()
from langchain import OpenAI, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
def test_langchain_llms():
question = "test_langchain_llms"
expect_answer = "hello"
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = OpenAI(model_name="text-ada-001")
llm = LangChainLLMs(llm=langchain_openai,cache_obj=llm_cache)
assert str(langchain_openai) == str(llm)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = llm(prompt=question)
assert expect_answer == answer
answer = llm(prompt=question)
assert expect_answer == answer
def test_langchain_chats():
question = [HumanMessage(content="test_langchain_chats")]
question2 = [HumanMessage(content="test_langchain_chats2")]
msg = "chat models"
expect_answer = {
"role": "assistant",
"message": msg,
"content": msg,
}
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_messages_last_content,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = ChatOpenAI(temperature=0)
chat = LangChainChat(chat=langchain_openai,cache_obj=llm_cache)
assert chat.get_num_tokens("hello") == langchain_openai.get_num_tokens("hello")
assert chat.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")]) \
== langchain_openai.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")])
with patch("openai.ChatCompletion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
with patch("openai.ChatCompletion.acreate") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
answer = asyncio.run(chat.agenerate([question]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
def test_last_content_without_template():
string_prompt = PromptTemplate.from_template("tell me a joke about {subject}")
template = string_prompt.template
cache_obj = Cache()
data_dir = str(random.random())
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj, pre_func=last_content_without_template, config=Config(template=template))
subject_str = "animal"
expect_answer = "this is a joke"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "caf86f4uutaoxfysmf7anj01xl6sv3ps",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": string_prompt.format(subject=subject_str)},
],
cache_obj=cache_obj,
)
assert get_message_from_openai_answer(response) == expect_answer, response
cache_obj.flush()
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj)
cache_res = get(str([subject_str]), cache_obj=cache_obj)
print(str([subject_str]))
assert cache_res == expect_answer, cache_res
| true | import asyncio
import os
import random
from unittest.mock import patch
from gptcache import Cache, Config
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache, get
from gptcache.adapter.langchain_models import LangChainLLMs, LangChainChat, _cache_msg_data_convert
from gptcache.processor.pre import get_prompt, last_content_without_template, get_messages_last_content
from gptcache.utils import import_pydantic, import_langchain
from gptcache.utils.response import get_message_from_openai_answer
import_pydantic()
import_langchain()
from langchain import OpenAI, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
def test_langchain_llms():
question = "test_langchain_llms"
expect_answer = "hello"
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = OpenAI(model_name="text-ada-001")
llm = LangChainLLMs(llm=langchain_openai,cache_obj=llm_cache)
assert str(langchain_openai) == str(llm)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = llm(prompt=question)
assert expect_answer == answer
answer = llm(prompt=question)
assert expect_answer == answer
def test_langchain_chats():
question = [HumanMessage(content="test_langchain_chats")]
question2 = [HumanMessage(content="test_langchain_chats2")]
msg = "chat models"
expect_answer = {
"role": "assistant",
"message": msg,
"content": msg,
}
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_messages_last_content,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = ChatOpenAI(temperature=0)
chat = LangChainChat(chat=langchain_openai,cache_obj=llm_cache)
assert chat.get_num_tokens("hello") == langchain_openai.get_num_tokens("hello")
assert chat.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")]) \
== langchain_openai.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")])
with patch("openai.ChatCompletion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
with patch("openai.ChatCompletion.acreate") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
answer = asyncio.run(chat.agenerate([question]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
def test_last_content_without_template():
string_prompt = PromptTemplate.from_template("tell me a joke about {subject}")
template = string_prompt.template
cache_obj = Cache()
data_dir = str(random.random())
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj, pre_func=last_content_without_template, config=Config(template=template))
subject_str = "animal"
expect_answer = "this is a joke"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": string_prompt.format(subject=subject_str)},
],
cache_obj=cache_obj,
)
assert get_message_from_openai_answer(response) == expect_answer, response
cache_obj.flush()
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj)
cache_res = get(str([subject_str]), cache_obj=cache_obj)
print(str([subject_str]))
assert cache_res == expect_answer, cache_res
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/document_loaders/mastodon.ipynb | from langchain.document_loaders import MastodonTootsLoader#!pip install Mastodon.pyloader = MastodonTootsLoader(
mastodon_accounts=["@Gargron@mastodon.social"],
number_toots=50, # Default value is 100
)
# Or set up access information to use a Mastodon app.
# Note that the access token can either be passed into
# constructor or you can set the environment "MASTODON_ACCESS_TOKEN".
# loader = MastodonTootsLoader(
# access_token="<ACCESS TOKEN OF MASTODON APP>",
# api_base_url="<API BASE URL OF MASTODON APP INSTANCE>",
# mastodon_accounts=["@Gargron@mastodon.social"],
# number_toots=50, # Default value is 100
# )documents = loader.load()
for doc in documents[:3]:
print(doc.page_content)
print("=" * 80) | 0 | 3,916 | [{"tag": "EMAIL", "value": "Gargron@mastodon.social", "start": 138, "end": 161}, {"tag": "EMAIL", "value": "Gargron@mastodon.social", "start": 566, "end": 589}] | true | 2 | from langchain.document_loaders import MastodonTootsLoader#!pip install Mastodon.pyloader = MastodonTootsLoader(
mastodon_accounts=["@anpch@example.com"],
number_toots=50, # Default value is 100
)
# Or set up access information to use a Mastodon app.
# Note that the access token can either be passed into
# constructor or you can set the environment "MASTODON_ACCESS_TOKEN".
# loader = MastodonTootsLoader(
# access_token="<ACCESS TOKEN OF MASTODON APP>",
# api_base_url="<API BASE URL OF MASTODON APP INSTANCE>",
# mastodon_accounts=["@anpch@example.com"],
# number_toots=50, # Default value is 100
# )documents = loader.load()
for doc in documents[:3]:
print(doc.page_content)
print("=" * 80) | true | from langchain.document_loaders import MastodonTootsLoader#!pip install Mastodon.pyloader = MastodonTootsLoader(
mastodon_accounts=["@PI:EMAIL:anpch@example.comEND_PI"],
number_toots=50, # Default value is 100
)
# Or set up access information to use a Mastodon app.
# Note that the access token can either be passed into
# constructor or you can set the environment "MASTODON_ACCESS_TOKEN".
# loader = MastodonTootsLoader(
# access_token="<ACCESS TOKEN OF MASTODON APP>",
# api_base_url="<API BASE URL OF MASTODON APP INSTANCE>",
# mastodon_accounts=["@PI:EMAIL:anpch@example.comEND_PI"],
# number_toots=50, # Default value is 100
# )documents = loader.load()
for doc in documents[:3]:
print(doc.page_content)
print("=" * 80) |
hf_public_repos/zilliztech | hf_public_repos/zilliztech/GPTCache/setup.py | import codecs
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
]
def read(*parts):
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="gptcache",
packages=find_packages(),
version=find_version("gptcache", "__init__.py"),
author="SimFG",
author_email="bang.fu@zilliz.com",
description="GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat "
"applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, "
"similar to how Redis works for traditional applications.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements('requirements.txt'),
url="https://github.com/zilliztech/GPTCache",
license='https://opensource.org/license/mit/',
python_requires='>=3.8.1',
entry_points={
'console_scripts': [
'gptcache_server=gptcache_server.server:main',
],
},
)
| 0 | 782 | [{"tag": "EMAIL", "value": "bang.fu@zilliz.com", "start": 1000, "end": 1018}] | true | 1 | import codecs
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
]
def read(*parts):
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="gptcache",
packages=find_packages(),
version=find_version("gptcache", "__init__.py"),
author="SimFG",
author_email="kenaa@example.com",
description="GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat "
"applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, "
"similar to how Redis works for traditional applications.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements('requirements.txt'),
url="https://github.com/zilliztech/GPTCache",
license='https://opensource.org/license/mit/',
python_requires='>=3.8.1',
entry_points={
'console_scripts': [
'gptcache_server=gptcache_server.server:main',
],
},
)
| true | import codecs
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
]
def read(*parts):
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="gptcache",
packages=find_packages(),
version=find_version("gptcache", "__init__.py"),
author="SimFG",
author_email="PI:EMAIL:kenaa@example.comEND_PI",
description="GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat "
"applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, "
"similar to how Redis works for traditional applications.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements('requirements.txt'),
url="https://github.com/zilliztech/GPTCache",
license='https://opensource.org/license/mit/',
python_requires='>=3.8.1',
entry_points={
'console_scripts': [
'gptcache_server=gptcache_server.server:main',
],
},
)
|
hf_public_repos/langchain-ai/langchain/libs/experimental/tests | hf_public_repos/langchain-ai/langchain/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py | import os
from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is johnsmith@gmail.com"
anonymizer = PresidioReversibleAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "johnsmith@gmail.com"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioReversibleAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 1
anonymized_name = persons[0]
assert anonymized_text.count(anonymized_name) == 3
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count(anonymized_name) == 3
assert anonymizer.deanonymizer_mapping["PERSON"][anonymized_name] == "John Smith"
text = "This is Jane Smith"
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 2
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
anonymizer.add_recognizer(custom_recognizer)
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymizer_mapping() -> None:
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
)
anonymizer.anonymize("Hello, my name is John Doe and my number is 444 555 6666.")
# ["PERSON", "PHONE_NUMBER"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"444 555 6666"
in anonymizer.deanonymizer_mapping.get("PHONE_NUMBER", {}).values()
)
text_to_anonymize = (
"And my name is Jane Doe, my email is jane@gmail.com and "
"my credit card is 4929 5319 6292 5362."
)
anonymizer.anonymize(text_to_anonymize)
# ["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert "Jane Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"jane@gmail.com"
in anonymizer.deanonymizer_mapping.get("EMAIL_ADDRESS", {}).values()
)
assert (
"4929 5319 6292 5362"
in anonymizer.deanonymizer_mapping.get("CREDIT_CARD", {}).values()
)
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymize() -> None:
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_save_load_deanonymizer_mapping() -> None:
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize("Hello, my name is John Doe.")
try:
anonymizer.save_deanonymizer_mapping("test_file.json")
assert os.path.isfile("test_file.json")
anonymizer = PresidioReversibleAnonymizer()
anonymizer.load_deanonymizer_mapping("test_file.json")
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
finally:
os.remove("test_file.json")
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
| 0 | 1,319 | [{"tag": "EMAIL", "value": "johnsmith@gmail.com", "start": 2213, "end": 2232}, {"tag": "EMAIL", "value": "johnsmith@gmail.com", "start": 2381, "end": 2400}, {"tag": "EMAIL", "value": "jane@gmail.com", "start": 6241, "end": 6255}, {"tag": "EMAIL", "value": "jane@gmail.com", "start": 6593, "end": 6607}] | true | 4 | import os
from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is lyhxr@example.com"
anonymizer = PresidioReversibleAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "lyhxr@example.com"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioReversibleAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 1
anonymized_name = persons[0]
assert anonymized_text.count(anonymized_name) == 3
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count(anonymized_name) == 3
assert anonymizer.deanonymizer_mapping["PERSON"][anonymized_name] == "John Smith"
text = "This is Jane Smith"
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 2
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
anonymizer.add_recognizer(custom_recognizer)
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymizer_mapping() -> None:
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
)
anonymizer.anonymize("Hello, my name is John Doe and my number is 444 555 6666.")
# ["PERSON", "PHONE_NUMBER"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"444 555 6666"
in anonymizer.deanonymizer_mapping.get("PHONE_NUMBER", {}).values()
)
text_to_anonymize = (
"And my name is Jane Doe, my email is upchh@example.com and "
"my credit card is 4929 5319 6292 5362."
)
anonymizer.anonymize(text_to_anonymize)
# ["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert "Jane Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"upchh@example.com"
in anonymizer.deanonymizer_mapping.get("EMAIL_ADDRESS", {}).values()
)
assert (
"4929 5319 6292 5362"
in anonymizer.deanonymizer_mapping.get("CREDIT_CARD", {}).values()
)
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymize() -> None:
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_save_load_deanonymizer_mapping() -> None:
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize("Hello, my name is John Doe.")
try:
anonymizer.save_deanonymizer_mapping("test_file.json")
assert os.path.isfile("test_file.json")
anonymizer = PresidioReversibleAnonymizer()
anonymizer.load_deanonymizer_mapping("test_file.json")
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
finally:
os.remove("test_file.json")
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
| true | import os
from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is PI:EMAIL:lyhxr@example.comEND_PI"
anonymizer = PresidioReversibleAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "PI:EMAIL:lyhxr@example.comEND_PI"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioReversibleAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 1
anonymized_name = persons[0]
assert anonymized_text.count(anonymized_name) == 3
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count(anonymized_name) == 3
assert anonymizer.deanonymizer_mapping["PERSON"][anonymized_name] == "John Smith"
text = "This is Jane Smith"
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 2
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
anonymizer.add_recognizer(custom_recognizer)
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymizer_mapping() -> None:
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
)
anonymizer.anonymize("Hello, my name is John Doe and my number is 444 555 6666.")
# ["PERSON", "PHONE_NUMBER"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"444 555 6666"
in anonymizer.deanonymizer_mapping.get("PHONE_NUMBER", {}).values()
)
text_to_anonymize = (
"And my name is Jane Doe, my email is PI:EMAIL:upchh@example.comEND_PI and "
"my credit card is 4929 5319 6292 5362."
)
anonymizer.anonymize(text_to_anonymize)
# ["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert "Jane Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"PI:EMAIL:upchh@example.comEND_PI"
in anonymizer.deanonymizer_mapping.get("EMAIL_ADDRESS", {}).values()
)
assert (
"4929 5319 6292 5362"
in anonymizer.deanonymizer_mapping.get("CREDIT_CARD", {}).values()
)
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymize() -> None:
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_save_load_deanonymizer_mapping() -> None:
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize("Hello, my name is John Doe.")
try:
anonymizer.save_deanonymizer_mapping("test_file.json")
assert os.path.isfile("test_file.json")
anonymizer = PresidioReversibleAnonymizer()
anonymizer.load_deanonymizer_mapping("test_file.json")
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
finally:
os.remove("test_file.json")
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/vectorstores/atlas.ipynb | import time
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import SpacyTextSplitter
from langchain.vectorstores import AtlasDB
from langchain.document_loaders import TextLoaderATLAS_TEST_API_KEY = "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6"loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = SpacyTextSplitter(separator="|")
texts = []
for doc in text_splitter.split_documents(documents):
texts.extend(doc.page_content.split("|"))
texts = [e.strip() for e in texts]db = AtlasDB.from_texts(
texts=texts,
name="test_index_" + str(time.time()), # unique name for your vector store
description="test_index", # a description for your vector store
api_key=ATLAS_TEST_API_KEY,
index_kwargs={"build_topic_model": True},
)db.project.wait_for_project_lock()db.project | 0 | 3,538 | [{"tag": "KEY", "value": "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6", "start": 237, "end": 282}] | true | 1 | import time
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import SpacyTextSplitter
from langchain.vectorstores import AtlasDB
from langchain.document_loaders import TextLoaderATLAS_TEST_API_KEY = "ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7"loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = SpacyTextSplitter(separator="|")
texts = []
for doc in text_splitter.split_documents(documents):
texts.extend(doc.page_content.split("|"))
texts = [e.strip() for e in texts]db = AtlasDB.from_texts(
texts=texts,
name="test_index_" + str(time.time()), # unique name for your vector store
description="test_index", # a description for your vector store
api_key=ATLAS_TEST_API_KEY,
index_kwargs={"build_topic_model": True},
)db.project.wait_for_project_lock()db.project | true | import time
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import SpacyTextSplitter
from langchain.vectorstores import AtlasDB
from langchain.document_loaders import TextLoaderATLAS_TEST_API_KEY = "PI:KEY:ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7END_PI"loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = SpacyTextSplitter(separator="|")
texts = []
for doc in text_splitter.split_documents(documents):
texts.extend(doc.page_content.split("|"))
texts = [e.strip() for e in texts]db = AtlasDB.from_texts(
texts=texts,
name="test_index_" + str(time.time()), # unique name for your vector store
description="test_index", # a description for your vector store
api_key=ATLAS_TEST_API_KEY,
index_kwargs={"build_topic_model": True},
)db.project.wait_for_project_lock()db.project |
hf_public_repos/langchain-ai/langchain/docs/docs/guides | hf_public_repos/langchain-ai/langchain/docs/docs/guides/safety/amazon_comprehend_chain.ipynb | %pip install boto3 nltk%pip install -U langchain_experimental%pip install -U langchain pydanticimport boto3
import os
comprehend_client = boto3.client("comprehend", region_name="us-east-1")from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client, verbose=True # optional
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPromptSafetyConfig,
ModerationPiiConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler# Define callback handlers by subclassing BaseModerationCallbackHandler
class MyModCallback(BaseModerationCallbackHandler):
async def on_after_pii(self, output_beacon, unique_id):
import json
moderation_type = output_beacon["moderation_type"]
chain_id = output_beacon["moderation_chain_id"]
with open(f"output-{moderation_type}-{chain_id}.json", "w") as file:
data = {"beacon_data": output_beacon, "unique_id": unique_id}
json.dump(data, file)
"""
async def on_after_toxicity(self, output_beacon, unique_id):
pass
async def on_after_prompt_safety(self, output_beacon, unique_id):
pass
"""
my_callback = MyModCallback()pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config])
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
unique_id="john.doe@email.com", # A unique ID
moderation_callback=my_callback, # BaseModerationCallbackHandler
verbose=True,
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])%pip install huggingface_hubimport os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>"# See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
repo_id = "google/flan-t5-xxl"from langchain.llms import HuggingFaceHub
from langchain.prompts import PromptTemplate
template = """{question}"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256}
)# define filter configs
pii_config = ModerationPiiConfig(
labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X"
)
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8)
# define different moderation configs using the filter configs above
moderation_config_1 = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
moderation_config_2 = BaseModerationConfig(filters=[pii_config])
# input prompt moderation chain with callback
amazon_comp_moderation = AmazonComprehendModerationChain(
moderation_config=moderation_config_1,
client=comprehend_client,
moderation_callback=my_callback,
verbose=True,
)
# Output from LLM moderation chain without callback
amazon_comp_moderation_out = AmazonComprehendModerationChain(
moderation_config=moderation_config_2, client=comprehend_client, verbose=True
)chain = (
prompt
| amazon_comp_moderation
| {"input": (lambda x: x["output"]) | llm}
| amazon_comp_moderation_out
)
try:
response = chain.invoke(
{
"question": """What is John Doe's address, phone number and SSN from the following text?
John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
"""
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name
region = "<REGION>" # replace with your SageMaker Endpoint regionfrom langchain.llms import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.prompts import PromptTemplate
import json
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode("utf-8")
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["generated_texts"][0]
content_handler = ContentHandler()
template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer.
Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
Question: {question}
Answer:
"""
# prompt template for input text
llm_prompt = PromptTemplate(template=template, input_variables=["question"])
llm = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={
"temperature": 0.95,
"max_length": 200,
"num_return_sequences": 3,
"top_k": 50,
"top_p": 0.95,
"do_sample": True,
},
content_handler=content_handler,
)# define filter configs
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
# define different moderation configs using the filter configs above
moderation_config_1 = BaseModerationConfig(filters=[pii_config, toxicity_config])
moderation_config_2 = BaseModerationConfig(filters=[pii_config])
# input prompt moderation chain with callback
amazon_comp_moderation = AmazonComprehendModerationChain(
moderation_config=moderation_config_1,
client=comprehend_client,
moderation_callback=my_callback,
verbose=True,
)
# Output from LLM moderation chain without callback
amazon_comp_moderation_out = AmazonComprehendModerationChain(
moderation_config=moderation_config_2, client=comprehend_client, verbose=True
)chain = (
prompt
| amazon_comp_moderation
| {"input": (lambda x: x["output"]) | llm}
| amazon_comp_moderation_out
)
try:
response = chain.invoke(
{"question": "What is John Doe's address, phone number and SSN?"}
)
except Exception as e:
print(str(e))
else:
print(response["output"]) | 0 | 4,006 | [{"tag": "EMAIL", "value": "john.doe@email.com", "start": 4529, "end": 4547}, {"tag": "EMAIL", "value": "johndoe@example.com", "start": 7770, "end": 7789}, {"tag": "EMAIL", "value": "johndoe@example.com", "start": 9392, "end": 9411}] | true | 3 | %pip install boto3 nltk%pip install -U langchain_experimental%pip install -U langchain pydanticimport boto3
import os
comprehend_client = boto3.client("comprehend", region_name="us-east-1")from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client, verbose=True # optional
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPromptSafetyConfig,
ModerationPiiConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler# Define callback handlers by subclassing BaseModerationCallbackHandler
class MyModCallback(BaseModerationCallbackHandler):
async def on_after_pii(self, output_beacon, unique_id):
import json
moderation_type = output_beacon["moderation_type"]
chain_id = output_beacon["moderation_chain_id"]
with open(f"output-{moderation_type}-{chain_id}.json", "w") as file:
data = {"beacon_data": output_beacon, "unique_id": unique_id}
json.dump(data, file)
"""
async def on_after_toxicity(self, output_beacon, unique_id):
pass
async def on_after_prompt_safety(self, output_beacon, unique_id):
pass
"""
my_callback = MyModCallback()pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config])
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
unique_id="kenaa@example.com", # A unique ID
moderation_callback=my_callback, # BaseModerationCallbackHandler
verbose=True,
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])%pip install huggingface_hubimport os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>"# See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
repo_id = "google/flan-t5-xxl"from langchain.llms import HuggingFaceHub
from langchain.prompts import PromptTemplate
template = """{question}"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256}
)# define filter configs
pii_config = ModerationPiiConfig(
labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X"
)
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8)
# define different moderation configs using the filter configs above
moderation_config_1 = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
moderation_config_2 = BaseModerationConfig(filters=[pii_config])
# input prompt moderation chain with callback
amazon_comp_moderation = AmazonComprehendModerationChain(
moderation_config=moderation_config_1,
client=comprehend_client,
moderation_callback=my_callback,
verbose=True,
)
# Output from LLM moderation chain without callback
amazon_comp_moderation_out = AmazonComprehendModerationChain(
moderation_config=moderation_config_2, client=comprehend_client, verbose=True
)chain = (
prompt
| amazon_comp_moderation
| {"input": (lambda x: x["output"]) | llm}
| amazon_comp_moderation_out
)
try:
response = chain.invoke(
{
"question": """What is John Doe's address, phone number and SSN from the following text?
John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at hzdkv@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
"""
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name
region = "<REGION>" # replace with your SageMaker Endpoint regionfrom langchain.llms import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.prompts import PromptTemplate
import json
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode("utf-8")
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["generated_texts"][0]
content_handler = ContentHandler()
template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer.
Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at hzdkv@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
Question: {question}
Answer:
"""
# prompt template for input text
llm_prompt = PromptTemplate(template=template, input_variables=["question"])
llm = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={
"temperature": 0.95,
"max_length": 200,
"num_return_sequences": 3,
"top_k": 50,
"top_p": 0.95,
"do_sample": True,
},
content_handler=content_handler,
)# define filter configs
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
# define different moderation configs using the filter configs above
moderation_config_1 = BaseModerationConfig(filters=[pii_config, toxicity_config])
moderation_config_2 = BaseModerationConfig(filters=[pii_config])
# input prompt moderation chain with callback
amazon_comp_moderation = AmazonComprehendModerationChain(
moderation_config=moderation_config_1,
client=comprehend_client,
moderation_callback=my_callback,
verbose=True,
)
# Output from LLM moderation chain without callback
amazon_comp_moderation_out = AmazonComprehendModerationChain(
moderation_config=moderation_config_2, client=comprehend_client, verbose=True
)chain = (
prompt
| amazon_comp_moderation
| {"input": (lambda x: x["output"]) | llm}
| amazon_comp_moderation_out
)
try:
response = chain.invoke(
{"question": "What is John Doe's address, phone number and SSN?"}
)
except Exception as e:
print(str(e))
else:
print(response["output"]) | true | %pip install boto3 nltk%pip install -U langchain_experimental%pip install -U langchain pydanticimport boto3
import os
comprehend_client = boto3.client("comprehend", region_name="us-east-1")from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client, verbose=True # optional
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPromptSafetyConfig,
ModerationPiiConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler# Define callback handlers by subclassing BaseModerationCallbackHandler
class MyModCallback(BaseModerationCallbackHandler):
async def on_after_pii(self, output_beacon, unique_id):
import json
moderation_type = output_beacon["moderation_type"]
chain_id = output_beacon["moderation_chain_id"]
with open(f"output-{moderation_type}-{chain_id}.json", "w") as file:
data = {"beacon_data": output_beacon, "unique_id": unique_id}
json.dump(data, file)
"""
async def on_after_toxicity(self, output_beacon, unique_id):
pass
async def on_after_prompt_safety(self, output_beacon, unique_id):
pass
"""
my_callback = MyModCallback()pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config])
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
unique_id="PI:EMAIL:kenaa@example.comEND_PI", # A unique ID
moderation_callback=my_callback, # BaseModerationCallbackHandler
verbose=True,
)from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
# replace with your own expletive
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])%pip install huggingface_hubimport os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>"# See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
repo_id = "google/flan-t5-xxl"from langchain.llms import HuggingFaceHub
from langchain.prompts import PromptTemplate
template = """{question}"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256}
)# define filter configs
pii_config = ModerationPiiConfig(
labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X"
)
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8)
# define different moderation configs using the filter configs above
moderation_config_1 = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
moderation_config_2 = BaseModerationConfig(filters=[pii_config])
# input prompt moderation chain with callback
amazon_comp_moderation = AmazonComprehendModerationChain(
moderation_config=moderation_config_1,
client=comprehend_client,
moderation_callback=my_callback,
verbose=True,
)
# Output from LLM moderation chain without callback
amazon_comp_moderation_out = AmazonComprehendModerationChain(
moderation_config=moderation_config_2, client=comprehend_client, verbose=True
)chain = (
prompt
| amazon_comp_moderation
| {"input": (lambda x: x["output"]) | llm}
| amazon_comp_moderation_out
)
try:
response = chain.invoke(
{
"question": """What is John Doe's address, phone number and SSN from the following text?
John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at PI:EMAIL:hzdkv@example.comEND_PI reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
"""
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name
region = "<REGION>" # replace with your SageMaker Endpoint regionfrom langchain.llms import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.prompts import PromptTemplate
import json
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode("utf-8")
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["generated_texts"][0]
content_handler = ContentHandler()
template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer.
Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at PI:EMAIL:hzdkv@example.comEND_PI reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
Question: {question}
Answer:
"""
# prompt template for input text
llm_prompt = PromptTemplate(template=template, input_variables=["question"])
llm = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={
"temperature": 0.95,
"max_length": 200,
"num_return_sequences": 3,
"top_k": 50,
"top_p": 0.95,
"do_sample": True,
},
content_handler=content_handler,
)# define filter configs
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
# define different moderation configs using the filter configs above
moderation_config_1 = BaseModerationConfig(filters=[pii_config, toxicity_config])
moderation_config_2 = BaseModerationConfig(filters=[pii_config])
# input prompt moderation chain with callback
amazon_comp_moderation = AmazonComprehendModerationChain(
moderation_config=moderation_config_1,
client=comprehend_client,
moderation_callback=my_callback,
verbose=True,
)
# Output from LLM moderation chain without callback
amazon_comp_moderation_out = AmazonComprehendModerationChain(
moderation_config=moderation_config_2, client=comprehend_client, verbose=True
)chain = (
prompt
| amazon_comp_moderation
| {"input": (lambda x: x["output"]) | llm}
| amazon_comp_moderation_out
)
try:
response = chain.invoke(
{"question": "What is John Doe's address, phone number and SSN?"}
)
except Exception as e:
print(str(e))
else:
print(response["output"]) |
hf_public_repos/gkamradt/langchain-tutorials | hf_public_repos/gkamradt/langchain-tutorials/agents/Agents + ZapierToolkit.ipynb | from langchain.llms import OpenAI
from langchain.agents import initialize_agent
from langchain.agents.agent_toolkits import ZapierToolkit
from langchain.utilities.zapier import ZapierNLAWrapper
import osos.environ["OPENAI_API_KEY"] = 'YourAPIKey'
os.environ["ZAPIER_NLA_API_KEY"] = 'YourAPIKey'llm = OpenAI(temperature=0)
zapier = ZapierNLAWrapper()
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
agent = initialize_agent(toolkit.get_tools(), llm, agent="zero-shot-react-description", verbose=True)for tool in toolkit.get_tools():
print (tool.name)
print (tool.description)
print ("\n\n")agent.run("""Summarize the last email I received from greg at Data Independent.
Send the summary to the trending domains channel in slack.""")agent.run("Get the last email I received from greg at Data Independent. Summarize the reply and create a tweet")agent.run("""Get the last email I received from greg at Data Independent.
Create a draft email in gmail back to Greg with a good positive reply""")agent.run("""Get the last email I received from greg@DataIndependent.com
Find a good gif that matches the intent of the email and send the gif to trending domains in slack""")agent.run("""Create a tweet that says, 'langchain + zapier is great'. \
Draft an email in gmail to greg @ data independent sharing my tweet with a personalized message""") | 0 | 147 | [{"tag": "EMAIL", "value": "greg@DataIndependent.com", "start": 1088, "end": 1112}] | true | 1 | from langchain.llms import OpenAI
from langchain.agents import initialize_agent
from langchain.agents.agent_toolkits import ZapierToolkit
from langchain.utilities.zapier import ZapierNLAWrapper
import osos.environ["OPENAI_API_KEY"] = 'YourAPIKey'
os.environ["ZAPIER_NLA_API_KEY"] = 'YourAPIKey'llm = OpenAI(temperature=0)
zapier = ZapierNLAWrapper()
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
agent = initialize_agent(toolkit.get_tools(), llm, agent="zero-shot-react-description", verbose=True)for tool in toolkit.get_tools():
print (tool.name)
print (tool.description)
print ("\n\n")agent.run("""Summarize the last email I received from greg at Data Independent.
Send the summary to the trending domains channel in slack.""")agent.run("Get the last email I received from greg at Data Independent. Summarize the reply and create a tweet")agent.run("""Get the last email I received from greg at Data Independent.
Create a draft email in gmail back to Greg with a good positive reply""")agent.run("""Get the last email I received from upchh@example.com
Find a good gif that matches the intent of the email and send the gif to trending domains in slack""")agent.run("""Create a tweet that says, 'langchain + zapier is great'. \
Draft an email in gmail to greg @ data independent sharing my tweet with a personalized message""") | true | from langchain.llms import OpenAI
from langchain.agents import initialize_agent
from langchain.agents.agent_toolkits import ZapierToolkit
from langchain.utilities.zapier import ZapierNLAWrapper
import osos.environ["OPENAI_API_KEY"] = 'YourAPIKey'
os.environ["ZAPIER_NLA_API_KEY"] = 'YourAPIKey'llm = OpenAI(temperature=0)
zapier = ZapierNLAWrapper()
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
agent = initialize_agent(toolkit.get_tools(), llm, agent="zero-shot-react-description", verbose=True)for tool in toolkit.get_tools():
print (tool.name)
print (tool.description)
print ("\n\n")agent.run("""Summarize the last email I received from greg at Data Independent.
Send the summary to the trending domains channel in slack.""")agent.run("Get the last email I received from greg at Data Independent. Summarize the reply and create a tweet")agent.run("""Get the last email I received from greg at Data Independent.
Create a draft email in gmail back to Greg with a good positive reply""")agent.run("""Get the last email I received from PI:EMAIL:upchh@example.comEND_PI
Find a good gif that matches the intent of the email and send the gif to trending domains in slack""")agent.run("""Create a tweet that says, 'langchain + zapier is great'. \
Draft an email in gmail to greg @ data independent sharing my tweet with a personalized message""") |
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb | from langchain.schema import Document
from langchain.document_transformers import DoctranTextTranslatorfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: john.doe@example.com) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at security@example.com.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: michael.johnson@example.com).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: david.rodriguez@example.com) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
jason@psychic.dev
"""documents = [Document(page_content=sample_text)]
qa_translator = DoctranTextTranslator(language="spanish")translated_document = await qa_translator.atransform_documents(documents)print(translated_document[0].page_content) | 0 | 3,791 | [{"tag": "EMAIL", "value": "john.doe@example.com", "start": 781, "end": 801}, {"tag": "EMAIL", "value": "security@example.com", "start": 1124, "end": 1144}, {"tag": "EMAIL", "value": "michael.johnson@example.com", "start": 1728, "end": 1755}, {"tag": "EMAIL", "value": "david.rodriguez@example.com", "start": 2551, "end": 2578}, {"tag": "EMAIL", "value": "jason@psychic.dev", "start": 3286, "end": 3303}] | true | 5 | from langchain.schema import Document
from langchain.document_transformers import DoctranTextTranslatorfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: anpch@example.com) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at envkt@example.com.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: hzdkv@example.com).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: efpyi@example.com) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
anpch@example.com
"""documents = [Document(page_content=sample_text)]
qa_translator = DoctranTextTranslator(language="spanish")translated_document = await qa_translator.atransform_documents(documents)print(translated_document[0].page_content) | true | from langchain.schema import Document
from langchain.document_transformers import DoctranTextTranslatorfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: PI:EMAIL:anpch@example.comEND_PI) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at PI:EMAIL:envkt@example.comEND_PI.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: PI:EMAIL:hzdkv@example.comEND_PI).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: PI:EMAIL:efpyi@example.comEND_PI) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
PI:EMAIL:anpch@example.comEND_PI
"""documents = [Document(page_content=sample_text)]
qa_translator = DoctranTextTranslator(language="spanish")translated_document = await qa_translator.atransform_documents(documents)print(translated_document[0].page_content) |
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb | import json
from langchain.schema import Document
from langchain.document_transformers import DoctranPropertyExtractorfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: john.doe@example.com) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at security@example.com.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: michael.johnson@example.com).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: david.rodriguez@example.com) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
jason@psychic.dev
"""
print(sample_text)documents = [Document(page_content=sample_text)]
properties = [
{
"name": "category",
"description": "What type of email this is.",
"type": "string",
"enum": ["update", "action_item", "customer_feedback", "announcement", "other"],
"required": True,
},
{
"name": "mentions",
"description": "A list of all people mentioned in this email.",
"type": "array",
"items": {
"name": "full_name",
"description": "The full name of the person mentioned.",
"type": "string",
},
"required": True,
},
{
"name": "eli5",
"description": "Explain this email to me like I'm 5 years old.",
"type": "string",
"required": True,
},
]
property_extractor = DoctranPropertyExtractor(properties=properties)extracted_document = await property_extractor.atransform_documents(
documents, properties=properties
)print(json.dumps(extracted_document[0].metadata, indent=2)) | 0 | 3,792 | [{"tag": "EMAIL", "value": "john.doe@example.com", "start": 796, "end": 816}, {"tag": "EMAIL", "value": "security@example.com", "start": 1139, "end": 1159}, {"tag": "EMAIL", "value": "michael.johnson@example.com", "start": 1743, "end": 1770}, {"tag": "EMAIL", "value": "david.rodriguez@example.com", "start": 2566, "end": 2593}, {"tag": "EMAIL", "value": "jason@psychic.dev", "start": 3301, "end": 3318}] | true | 5 | import json
from langchain.schema import Document
from langchain.document_transformers import DoctranPropertyExtractorfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: hzdkv@example.com) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at hzdkv@example.com.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: ychag@example.com).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: upchh@example.com) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
ychag@example.com
"""
print(sample_text)documents = [Document(page_content=sample_text)]
properties = [
{
"name": "category",
"description": "What type of email this is.",
"type": "string",
"enum": ["update", "action_item", "customer_feedback", "announcement", "other"],
"required": True,
},
{
"name": "mentions",
"description": "A list of all people mentioned in this email.",
"type": "array",
"items": {
"name": "full_name",
"description": "The full name of the person mentioned.",
"type": "string",
},
"required": True,
},
{
"name": "eli5",
"description": "Explain this email to me like I'm 5 years old.",
"type": "string",
"required": True,
},
]
property_extractor = DoctranPropertyExtractor(properties=properties)extracted_document = await property_extractor.atransform_documents(
documents, properties=properties
)print(json.dumps(extracted_document[0].metadata, indent=2)) | true | import json
from langchain.schema import Document
from langchain.document_transformers import DoctranPropertyExtractorfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: PI:EMAIL:hzdkv@example.comEND_PI) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at PI:EMAIL:hzdkv@example.comEND_PI.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: PI:EMAIL:ychag@example.comEND_PI).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: PI:EMAIL:upchh@example.comEND_PI) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
PI:EMAIL:ychag@example.comEND_PI
"""
print(sample_text)documents = [Document(page_content=sample_text)]
properties = [
{
"name": "category",
"description": "What type of email this is.",
"type": "string",
"enum": ["update", "action_item", "customer_feedback", "announcement", "other"],
"required": True,
},
{
"name": "mentions",
"description": "A list of all people mentioned in this email.",
"type": "array",
"items": {
"name": "full_name",
"description": "The full name of the person mentioned.",
"type": "string",
},
"required": True,
},
{
"name": "eli5",
"description": "Explain this email to me like I'm 5 years old.",
"type": "string",
"required": True,
},
]
property_extractor = DoctranPropertyExtractor(properties=properties)extracted_document = await property_extractor.atransform_documents(
documents, properties=properties
)print(json.dumps(extracted_document[0].metadata, indent=2)) |
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb | import json
from langchain.schema import Document
from langchain.document_transformers import DoctranQATransformerfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: john.doe@example.com) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at security@example.com.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: michael.johnson@example.com).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: david.rodriguez@example.com) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
jason@psychic.dev
"""
print(sample_text)documents = [Document(page_content=sample_text)]
qa_transformer = DoctranQATransformer()
transformed_document = await qa_transformer.atransform_documents(documents)transformed_document = await qa_transformer.atransform_documents(documents)
print(json.dumps(transformed_document[0].metadata, indent=2)) | 0 | 3,793 | [{"tag": "EMAIL", "value": "john.doe@example.com", "start": 792, "end": 812}, {"tag": "EMAIL", "value": "security@example.com", "start": 1135, "end": 1155}, {"tag": "EMAIL", "value": "michael.johnson@example.com", "start": 1739, "end": 1766}, {"tag": "EMAIL", "value": "david.rodriguez@example.com", "start": 2562, "end": 2589}, {"tag": "EMAIL", "value": "jason@psychic.dev", "start": 3297, "end": 3314}] | true | 5 | import json
from langchain.schema import Document
from langchain.document_transformers import DoctranQATransformerfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: anpch@example.com) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at nnheo@example.com.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: kenaa@example.com).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: ychag@example.com) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
ychag@example.com
"""
print(sample_text)documents = [Document(page_content=sample_text)]
qa_transformer = DoctranQATransformer()
transformed_document = await qa_transformer.atransform_documents(documents)transformed_document = await qa_transformer.atransform_documents(documents)
print(json.dumps(transformed_document[0].metadata, indent=2)) | true | import json
from langchain.schema import Document
from langchain.document_transformers import DoctranQATransformerfrom dotenv import load_dotenv
load_dotenv()sample_text = """[Generated with ChatGPT]
Confidential Document - For Internal Use Only
Date: July 1, 2023
Subject: Updates and Discussions on Various Topics
Dear Team,
I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential.
Security and Privacy Measures
As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: PI:EMAIL:anpch@example.comEND_PI) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at PI:EMAIL:nnheo@example.comEND_PI.
HR Updates and Employee Benefits
Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: PI:EMAIL:kenaa@example.comEND_PI).
Marketing Initiatives and Campaigns
Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.
Research and Development Projects
In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: PI:EMAIL:ychag@example.comEND_PI) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th.
Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly.
Thank you for your attention, and let's continue to work together to achieve our goals.
Best regards,
Jason Fan
Cofounder & CEO
Psychic
PI:EMAIL:ychag@example.comEND_PI
"""
print(sample_text)documents = [Document(page_content=sample_text)]
qa_transformer = DoctranQATransformer()
transformed_document = await qa_transformer.atransform_documents(documents)transformed_document = await qa_transformer.atransform_documents(documents)
print(json.dumps(transformed_document[0].metadata, indent=2)) |
hf_public_repos/langchain-ai/langchain/libs/experimental/tests | hf_public_repos/langchain-ai/langchain/libs/experimental/tests/unit_tests/test_data_anonymizer.py | from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is johnsmith@gmail.com"
anonymizer = PresidioAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "johnsmith@gmail.com"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count("Connie Lawrence") == 3
# New name should be generated
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count("Connie Lawrence") == 0
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioAnonymizer
anonymizer = PresidioAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_exact_matching_strategy() -> None:
"""
Test exact matching strategy for deanonymization.
"""
from langchain_experimental.data_anonymizer import (
deanonymizer_matching_strategies as dms,
)
deanonymizer_mapping = {
"PERSON": {"Maria Lynch": "Slim Shady"},
"PHONE_NUMBER": {"7344131647": "313-666-7440"},
"EMAIL_ADDRESS": {"wdavis@example.net": "real.slim.shady@gmail.com"},
"CREDIT_CARD": {"213186379402654": "4916 0387 9536 0861"},
}
text = (
"Are you Maria Lynch? I found your card with number 213186379402654. "
"Is this your phone number: 7344131647? "
"Is this your email address: wdavis@example.net"
)
deanonymized_text = dms.exact_matching_strategy(text, deanonymizer_mapping)
for original_value in [
"Slim Shady",
"313-666-7440",
"real.slim.shady@gmail.com",
"4916 0387 9536 0861",
]:
assert original_value in deanonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_best_matching_strategy() -> None:
"""
Test exact matching strategy for deanonymization.
"""
from langchain_experimental.data_anonymizer import (
deanonymizer_matching_strategies as dms,
)
deanonymizer_mapping = {
"PERSON": {"Maria Lynch": "Slim Shady"},
"PHONE_NUMBER": {"7344131647": "313-666-7440"},
"EMAIL_ADDRESS": {"wdavis@example.net": "real.slim.shady@gmail.com"},
"CREDIT_CARD": {"213186379402654": "4916 0387 9536 0861"},
}
# Changed some values:
# - "Maria Lynch" -> "Maria K. Lynch"
# - "7344131647" -> "734-413-1647"
# - "213186379402654" -> "2131 8637 9402 654"
# - "wdavis@example.net" -> the same to test exact match
text = (
"Are you Maria K. Lynch? I found your card with number 2131 8637 9402 654. "
"Is this your phone number: 734-413-1647?"
"Is this your email address: wdavis@example.net"
)
deanonymized_text = dms.combined_exact_fuzzy_matching_strategy(
text, deanonymizer_mapping
)
for original_value in [
"Slim Shady",
"313-666-7440",
"real.slim.shady@gmail.com",
"4916 0387 9536 0861",
]:
assert original_value in deanonymized_text
| 0 | 1,314 | [{"tag": "EMAIL", "value": "johnsmith@gmail.com", "start": 2153, "end": 2172}, {"tag": "EMAIL", "value": "johnsmith@gmail.com", "start": 2311, "end": 2330}, {"tag": "EMAIL", "value": "wdavis@example.net", "start": 6008, "end": 6026}, {"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 6030, "end": 6055}, {"tag": "EMAIL", "value": "wdavis@example.net", "start": 6312, "end": 6330}, {"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 6503, "end": 6528}, {"tag": "EMAIL", "value": "wdavis@example.net", "start": 7084, "end": 7102}, {"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 7106, "end": 7131}, {"tag": "EMAIL", "value": "wdavis@example.net", "start": 7376, "end": 7394}, {"tag": "EMAIL", "value": "wdavis@example.net", "start": 7614, "end": 7632}, {"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 7834, "end": 7859}] | true | 11 | from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is hzdkv@example.com"
anonymizer = PresidioAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "hzdkv@example.com"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count("Connie Lawrence") == 3
# New name should be generated
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count("Connie Lawrence") == 0
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioAnonymizer
anonymizer = PresidioAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_exact_matching_strategy() -> None:
"""
Test exact matching strategy for deanonymization.
"""
from langchain_experimental.data_anonymizer import (
deanonymizer_matching_strategies as dms,
)
deanonymizer_mapping = {
"PERSON": {"Maria Lynch": "Slim Shady"},
"PHONE_NUMBER": {"7344131647": "313-666-7440"},
"EMAIL_ADDRESS": {"nnheo@example.com": "envkt@example.com"},
"CREDIT_CARD": {"213186379402654": "4916 0387 9536 0861"},
}
text = (
"Are you Maria Lynch? I found your card with number 213186379402654. "
"Is this your phone number: 7344131647? "
"Is this your email address: nnheo@example.com"
)
deanonymized_text = dms.exact_matching_strategy(text, deanonymizer_mapping)
for original_value in [
"Slim Shady",
"313-666-7440",
"envkt@example.com",
"4916 0387 9536 0861",
]:
assert original_value in deanonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_best_matching_strategy() -> None:
"""
Test exact matching strategy for deanonymization.
"""
from langchain_experimental.data_anonymizer import (
deanonymizer_matching_strategies as dms,
)
deanonymizer_mapping = {
"PERSON": {"Maria Lynch": "Slim Shady"},
"PHONE_NUMBER": {"7344131647": "313-666-7440"},
"EMAIL_ADDRESS": {"nnheo@example.com": "envkt@example.com"},
"CREDIT_CARD": {"213186379402654": "4916 0387 9536 0861"},
}
# Changed some values:
# - "Maria Lynch" -> "Maria K. Lynch"
# - "7344131647" -> "734-413-1647"
# - "213186379402654" -> "2131 8637 9402 654"
# - "nnheo@example.com" -> the same to test exact match
text = (
"Are you Maria K. Lynch? I found your card with number 2131 8637 9402 654. "
"Is this your phone number: 734-413-1647?"
"Is this your email address: nnheo@example.com"
)
deanonymized_text = dms.combined_exact_fuzzy_matching_strategy(
text, deanonymizer_mapping
)
for original_value in [
"Slim Shady",
"313-666-7440",
"envkt@example.com",
"4916 0387 9536 0861",
]:
assert original_value in deanonymized_text
| true | from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is PI:EMAIL:hzdkv@example.comEND_PI"
anonymizer = PresidioAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "PI:EMAIL:hzdkv@example.comEND_PI"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count("Connie Lawrence") == 3
# New name should be generated
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count("Connie Lawrence") == 0
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioAnonymizer
anonymizer = PresidioAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_exact_matching_strategy() -> None:
"""
Test exact matching strategy for deanonymization.
"""
from langchain_experimental.data_anonymizer import (
deanonymizer_matching_strategies as dms,
)
deanonymizer_mapping = {
"PERSON": {"Maria Lynch": "Slim Shady"},
"PHONE_NUMBER": {"7344131647": "313-666-7440"},
"EMAIL_ADDRESS": {"PI:EMAIL:nnheo@example.comEND_PI": "PI:EMAIL:envkt@example.comEND_PI"},
"CREDIT_CARD": {"213186379402654": "4916 0387 9536 0861"},
}
text = (
"Are you Maria Lynch? I found your card with number 213186379402654. "
"Is this your phone number: 7344131647? "
"Is this your email address: PI:EMAIL:nnheo@example.comEND_PI"
)
deanonymized_text = dms.exact_matching_strategy(text, deanonymizer_mapping)
for original_value in [
"Slim Shady",
"313-666-7440",
"PI:EMAIL:envkt@example.comEND_PI",
"4916 0387 9536 0861",
]:
assert original_value in deanonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_best_matching_strategy() -> None:
"""
Test exact matching strategy for deanonymization.
"""
from langchain_experimental.data_anonymizer import (
deanonymizer_matching_strategies as dms,
)
deanonymizer_mapping = {
"PERSON": {"Maria Lynch": "Slim Shady"},
"PHONE_NUMBER": {"7344131647": "313-666-7440"},
"EMAIL_ADDRESS": {"PI:EMAIL:nnheo@example.comEND_PI": "PI:EMAIL:envkt@example.comEND_PI"},
"CREDIT_CARD": {"213186379402654": "4916 0387 9536 0861"},
}
# Changed some values:
# - "Maria Lynch" -> "Maria K. Lynch"
# - "7344131647" -> "734-413-1647"
# - "213186379402654" -> "2131 8637 9402 654"
# - "PI:EMAIL:nnheo@example.comEND_PI" -> the same to test exact match
text = (
"Are you Maria K. Lynch? I found your card with number 2131 8637 9402 654. "
"Is this your phone number: 734-413-1647?"
"Is this your email address: PI:EMAIL:nnheo@example.comEND_PI"
)
deanonymized_text = dms.combined_exact_fuzzy_matching_strategy(
text, deanonymizer_mapping
)
for original_value in [
"Slim Shady",
"313-666-7440",
"PI:EMAIL:envkt@example.comEND_PI",
"4916 0387 9536 0861",
]:
assert original_value in deanonymized_text
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/memory/sql_chat_message_history.ipynb | from langchain.memory.chat_message_histories import SQLChatMessageHistory
chat_message_history = SQLChatMessageHistory(
session_id="test_session", connection_string="sqlite:///sqlite.db"
)
chat_message_history.add_user_message("Hello")
chat_message_history.add_ai_message("Hi")chat_message_history.messagesfrom datetime import datetime
from langchain.schema import BaseMessage, HumanMessage, AIMessage, SystemMessage
from typing import Any
from sqlalchemy import Column, Integer, Text, DateTime
from sqlalchemy.orm import declarative_base
from langchain.memory.chat_message_histories.sql import BaseMessageConverter
Base = declarative_base()
class CustomMessage(Base):
__tablename__ = "custom_message_store"
id = Column(Integer, primary_key=True)
session_id = Column(Text)
type = Column(Text)
content = Column(Text)
created_at = Column(DateTime)
author_email = Column(Text)
class CustomMessageConverter(BaseMessageConverter):
def __init__(self, author_email: str):
self.author_email = author_email
def from_sql_model(self, sql_message: Any) -> BaseMessage:
if sql_message.type == "human":
return HumanMessage(
content=sql_message.content,
)
elif sql_message.type == "ai":
return AIMessage(
content=sql_message.content,
)
elif sql_message.type == "system":
return SystemMessage(
content=sql_message.content,
)
else:
raise ValueError(f"Unknown message type: {sql_message.type}")
def to_sql_model(self, message: BaseMessage, session_id: str) -> Any:
now = datetime.now()
return CustomMessage(
session_id=session_id,
type=message.type,
content=message.content,
created_at=now,
author_email=self.author_email,
)
def get_sql_model_class(self) -> Any:
return CustomMessage
chat_message_history = SQLChatMessageHistory(
session_id="test_session",
connection_string="sqlite:///sqlite.db",
custom_message_converter=CustomMessageConverter(author_email="test@example.com"),
)
chat_message_history.add_user_message("Hello")
chat_message_history.add_ai_message("Hi")chat_message_history.messages | 0 | 3,696 | [{"tag": "EMAIL", "value": "test@example.com", "start": 2180, "end": 2196}] | true | 1 | from langchain.memory.chat_message_histories import SQLChatMessageHistory
chat_message_history = SQLChatMessageHistory(
session_id="test_session", connection_string="sqlite:///sqlite.db"
)
chat_message_history.add_user_message("Hello")
chat_message_history.add_ai_message("Hi")chat_message_history.messagesfrom datetime import datetime
from langchain.schema import BaseMessage, HumanMessage, AIMessage, SystemMessage
from typing import Any
from sqlalchemy import Column, Integer, Text, DateTime
from sqlalchemy.orm import declarative_base
from langchain.memory.chat_message_histories.sql import BaseMessageConverter
Base = declarative_base()
class CustomMessage(Base):
__tablename__ = "custom_message_store"
id = Column(Integer, primary_key=True)
session_id = Column(Text)
type = Column(Text)
content = Column(Text)
created_at = Column(DateTime)
author_email = Column(Text)
class CustomMessageConverter(BaseMessageConverter):
def __init__(self, author_email: str):
self.author_email = author_email
def from_sql_model(self, sql_message: Any) -> BaseMessage:
if sql_message.type == "human":
return HumanMessage(
content=sql_message.content,
)
elif sql_message.type == "ai":
return AIMessage(
content=sql_message.content,
)
elif sql_message.type == "system":
return SystemMessage(
content=sql_message.content,
)
else:
raise ValueError(f"Unknown message type: {sql_message.type}")
def to_sql_model(self, message: BaseMessage, session_id: str) -> Any:
now = datetime.now()
return CustomMessage(
session_id=session_id,
type=message.type,
content=message.content,
created_at=now,
author_email=self.author_email,
)
def get_sql_model_class(self) -> Any:
return CustomMessage
chat_message_history = SQLChatMessageHistory(
session_id="test_session",
connection_string="sqlite:///sqlite.db",
custom_message_converter=CustomMessageConverter(author_email="nnheo@example.com"),
)
chat_message_history.add_user_message("Hello")
chat_message_history.add_ai_message("Hi")chat_message_history.messages | true | from langchain.memory.chat_message_histories import SQLChatMessageHistory
chat_message_history = SQLChatMessageHistory(
session_id="test_session", connection_string="sqlite:///sqlite.db"
)
chat_message_history.add_user_message("Hello")
chat_message_history.add_ai_message("Hi")chat_message_history.messagesfrom datetime import datetime
from langchain.schema import BaseMessage, HumanMessage, AIMessage, SystemMessage
from typing import Any
from sqlalchemy import Column, Integer, Text, DateTime
from sqlalchemy.orm import declarative_base
from langchain.memory.chat_message_histories.sql import BaseMessageConverter
Base = declarative_base()
class CustomMessage(Base):
__tablename__ = "custom_message_store"
id = Column(Integer, primary_key=True)
session_id = Column(Text)
type = Column(Text)
content = Column(Text)
created_at = Column(DateTime)
author_email = Column(Text)
class CustomMessageConverter(BaseMessageConverter):
def __init__(self, author_email: str):
self.author_email = author_email
def from_sql_model(self, sql_message: Any) -> BaseMessage:
if sql_message.type == "human":
return HumanMessage(
content=sql_message.content,
)
elif sql_message.type == "ai":
return AIMessage(
content=sql_message.content,
)
elif sql_message.type == "system":
return SystemMessage(
content=sql_message.content,
)
else:
raise ValueError(f"Unknown message type: {sql_message.type}")
def to_sql_model(self, message: BaseMessage, session_id: str) -> Any:
now = datetime.now()
return CustomMessage(
session_id=session_id,
type=message.type,
content=message.content,
created_at=now,
author_email=self.author_email,
)
def get_sql_model_class(self) -> Any:
return CustomMessage
chat_message_history = SQLChatMessageHistory(
session_id="test_session",
connection_string="sqlite:///sqlite.db",
custom_message_converter=CustomMessageConverter(author_email="PI:EMAIL:nnheo@example.comEND_PI"),
)
chat_message_history.add_user_message("Hello")
chat_message_history.add_ai_message("Hi")chat_message_history.messages |
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/tools | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/tools/office365/messages_search.py | """Util that Searches email messages in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Extra, Field
from langchain.tools.office365.base import O365BaseTool
from langchain.tools.office365.utils import clean_body
class SearchEmailsInput(BaseModel):
"""Input for SearchEmails Tool."""
"""From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
folder: str = Field(
default=None,
description=(
" If the user wants to search in only one folder, the name of the folder. "
'Default folders are "inbox", "drafts", "sent items", "deleted ttems", but '
"users can search custom folders as well."
),
)
query: str = Field(
description=(
"The Microsoift Graph v1.0 $search query. Example filters include "
"from:sender, from:sender, to:recipient, subject:subject, "
"recipients:list_of_recipients, body:excitement, importance:high, "
"received>2022-12-01, received<2021-12-01, sent>2022-12-01, "
"sent<2021-12-01, hasAttachments:true attachment:api-catalog.md, "
"cc:samanthab@contoso.com, bcc:samanthab@contoso.com, body:excitement date "
"range example: received:2023-06-08..2023-06-09 matching example: "
"from:amy OR from:david."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the email body is truncated to meet token number limits. Set to "
"False for searches that will retrieve very few results, otherwise, set to "
"True"
),
)
class O365SearchEmails(O365BaseTool):
"""Class for searching email messages in Office 365
Free, but setup is required
"""
name: str = "messages_search"
args_schema: Type[BaseModel] = SearchEmailsInput
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Microsoft Graph v1.0 $search query."
" The output is a JSON list of the requested resource."
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _run(
self,
query: str,
folder: str = "",
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
# Get mailbox object
mailbox = self.account.mailbox()
# Pull the folder if the user wants to search in a folder
if folder != "":
mailbox = mailbox.get_folder(folder_name=folder)
# Retrieve messages based on query
query = mailbox.q().search(query)
messages = mailbox.get_messages(limit=max_results, query=query)
# Generate output dict
output_messages = []
for message in messages:
output_message = {}
output_message["from"] = message.sender
if truncate:
output_message["body"] = message.body_preview
else:
output_message["body"] = clean_body(message.body)
output_message["subject"] = message.subject
output_message["date"] = message.modified.strftime("%Y-%m-%dT%H:%M:%S%z")
output_message["to"] = []
for recipient in message.to._recipients:
output_message["to"].append(str(recipient))
output_message["cc"] = []
for recipient in message.cc._recipients:
output_message["cc"].append(str(recipient))
output_message["bcc"] = []
for recipient in message.bcc._recipients:
output_message["bcc"].append(str(recipient))
output_messages.append(output_message)
return output_messages
| 0 | 2,611 | [{"tag": "EMAIL", "value": "samanthab@contoso.com", "start": 1359, "end": 1380}, {"tag": "EMAIL", "value": "samanthab@contoso.com", "start": 1386, "end": 1407}] | true | 2 | """Util that Searches email messages in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Extra, Field
from langchain.tools.office365.base import O365BaseTool
from langchain.tools.office365.utils import clean_body
class SearchEmailsInput(BaseModel):
"""Input for SearchEmails Tool."""
"""From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
folder: str = Field(
default=None,
description=(
" If the user wants to search in only one folder, the name of the folder. "
'Default folders are "inbox", "drafts", "sent items", "deleted ttems", but '
"users can search custom folders as well."
),
)
query: str = Field(
description=(
"The Microsoift Graph v1.0 $search query. Example filters include "
"from:sender, from:sender, to:recipient, subject:subject, "
"recipients:list_of_recipients, body:excitement, importance:high, "
"received>2022-12-01, received<2021-12-01, sent>2022-12-01, "
"sent<2021-12-01, hasAttachments:true attachment:api-catalog.md, "
"cc:envkt@example.com, bcc:envkt@example.com, body:excitement date "
"range example: received:2023-06-08..2023-06-09 matching example: "
"from:amy OR from:david."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the email body is truncated to meet token number limits. Set to "
"False for searches that will retrieve very few results, otherwise, set to "
"True"
),
)
class O365SearchEmails(O365BaseTool):
"""Class for searching email messages in Office 365
Free, but setup is required
"""
name: str = "messages_search"
args_schema: Type[BaseModel] = SearchEmailsInput
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Microsoft Graph v1.0 $search query."
" The output is a JSON list of the requested resource."
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _run(
self,
query: str,
folder: str = "",
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
# Get mailbox object
mailbox = self.account.mailbox()
# Pull the folder if the user wants to search in a folder
if folder != "":
mailbox = mailbox.get_folder(folder_name=folder)
# Retrieve messages based on query
query = mailbox.q().search(query)
messages = mailbox.get_messages(limit=max_results, query=query)
# Generate output dict
output_messages = []
for message in messages:
output_message = {}
output_message["from"] = message.sender
if truncate:
output_message["body"] = message.body_preview
else:
output_message["body"] = clean_body(message.body)
output_message["subject"] = message.subject
output_message["date"] = message.modified.strftime("%Y-%m-%dT%H:%M:%S%z")
output_message["to"] = []
for recipient in message.to._recipients:
output_message["to"].append(str(recipient))
output_message["cc"] = []
for recipient in message.cc._recipients:
output_message["cc"].append(str(recipient))
output_message["bcc"] = []
for recipient in message.bcc._recipients:
output_message["bcc"].append(str(recipient))
output_messages.append(output_message)
return output_messages
| true | """Util that Searches email messages in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Extra, Field
from langchain.tools.office365.base import O365BaseTool
from langchain.tools.office365.utils import clean_body
class SearchEmailsInput(BaseModel):
"""Input for SearchEmails Tool."""
"""From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
folder: str = Field(
default=None,
description=(
" If the user wants to search in only one folder, the name of the folder. "
'Default folders are "inbox", "drafts", "sent items", "deleted ttems", but '
"users can search custom folders as well."
),
)
query: str = Field(
description=(
"The Microsoift Graph v1.0 $search query. Example filters include "
"from:sender, from:sender, to:recipient, subject:subject, "
"recipients:list_of_recipients, body:excitement, importance:high, "
"received>2022-12-01, received<2021-12-01, sent>2022-12-01, "
"sent<2021-12-01, hasAttachments:true attachment:api-catalog.md, "
"cc:PI:EMAIL:envkt@example.comEND_PI, bcc:PI:EMAIL:envkt@example.comEND_PI, body:excitement date "
"range example: received:2023-06-08..2023-06-09 matching example: "
"from:amy OR from:david."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the email body is truncated to meet token number limits. Set to "
"False for searches that will retrieve very few results, otherwise, set to "
"True"
),
)
class O365SearchEmails(O365BaseTool):
"""Class for searching email messages in Office 365
Free, but setup is required
"""
name: str = "messages_search"
args_schema: Type[BaseModel] = SearchEmailsInput
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Microsoft Graph v1.0 $search query."
" The output is a JSON list of the requested resource."
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _run(
self,
query: str,
folder: str = "",
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
# Get mailbox object
mailbox = self.account.mailbox()
# Pull the folder if the user wants to search in a folder
if folder != "":
mailbox = mailbox.get_folder(folder_name=folder)
# Retrieve messages based on query
query = mailbox.q().search(query)
messages = mailbox.get_messages(limit=max_results, query=query)
# Generate output dict
output_messages = []
for message in messages:
output_message = {}
output_message["from"] = message.sender
if truncate:
output_message["body"] = message.body_preview
else:
output_message["body"] = clean_body(message.body)
output_message["subject"] = message.subject
output_message["date"] = message.modified.strftime("%Y-%m-%dT%H:%M:%S%z")
output_message["to"] = []
for recipient in message.to._recipients:
output_message["to"].append(str(recipient))
output_message["cc"] = []
for recipient in message.cc._recipients:
output_message["cc"].append(str(recipient))
output_message["bcc"] = []
for recipient in message.bcc._recipients:
output_message["bcc"].append(str(recipient))
output_messages.append(output_message)
return output_messages
|
hf_public_repos/eosphoros-ai | hf_public_repos/eosphoros-ai/DB-GPT/setup.py | from typing import List, Tuple
import setuptools
import platform
import subprocess
import os
from enum import Enum
import urllib.request
from urllib.parse import urlparse, quote
import re
import shutil
from setuptools import find_packages
with open("README.md", mode="r", encoding="utf-8") as fh:
long_description = fh.read()
BUILD_NO_CACHE = os.getenv("BUILD_NO_CACHE", "true").lower() == "true"
LLAMA_CPP_GPU_ACCELERATION = (
os.getenv("LLAMA_CPP_GPU_ACCELERATION", "true").lower() == "true"
)
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip()
for require in f
if require.strip() and not require.startswith("#")
]
def get_latest_version(package_name: str, index_url: str, default_version: str):
command = [
"python",
"-m",
"pip",
"index",
"versions",
package_name,
"--index-url",
index_url,
]
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
print("Error executing command.")
print(result.stderr.decode())
return default_version
output = result.stdout.decode()
lines = output.split("\n")
for line in lines:
if "Available versions:" in line:
available_versions = line.split(":")[1].strip()
latest_version = available_versions.split(",")[0].strip()
return latest_version
return default_version
def encode_url(package_url: str) -> str:
parsed_url = urlparse(package_url)
encoded_path = quote(parsed_url.path)
safe_url = parsed_url._replace(path=encoded_path).geturl()
return safe_url, parsed_url.path
def cache_package(package_url: str, package_name: str, is_windows: bool = False):
safe_url, parsed_url = encode_url(package_url)
if BUILD_NO_CACHE:
return safe_url
from pip._internal.utils.appdirs import user_cache_dir
filename = os.path.basename(parsed_url)
cache_dir = os.path.join(user_cache_dir("pip"), "http", "wheels", package_name)
os.makedirs(cache_dir, exist_ok=True)
local_path = os.path.join(cache_dir, filename)
if not os.path.exists(local_path):
temp_path = local_path + ".tmp"
if os.path.exists(temp_path):
os.remove(temp_path)
try:
print(f"Download {safe_url} to {local_path}")
urllib.request.urlretrieve(safe_url, temp_path)
shutil.move(temp_path, local_path)
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
return f"file:///{local_path}" if is_windows else f"file://{local_path}"
class SetupSpec:
def __init__(self) -> None:
self.extras: dict = {}
self.install_requires: List[str] = []
setup_spec = SetupSpec()
class AVXType(Enum):
BASIC = "basic"
AVX = "AVX"
AVX2 = "AVX2"
AVX512 = "AVX512"
@staticmethod
def of_type(avx: str):
for item in AVXType:
if item._value_ == avx:
return item
return None
class OSType(Enum):
WINDOWS = "win"
LINUX = "linux"
DARWIN = "darwin"
OTHER = "other"
def get_cpu_avx_support() -> Tuple[OSType, AVXType]:
system = platform.system()
os_type = OSType.OTHER
cpu_avx = AVXType.BASIC
env_cpu_avx = AVXType.of_type(os.getenv("DBGPT_LLAMA_CPP_AVX"))
if "windows" in system.lower():
os_type = OSType.WINDOWS
output = "avx2"
print("Current platform is windows, use avx2 as default cpu architecture")
elif system == "Linux":
os_type = OSType.LINUX
result = subprocess.run(
["lscpu"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output = result.stdout.decode()
elif system == "Darwin":
os_type = OSType.DARWIN
result = subprocess.run(
["sysctl", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output = result.stdout.decode()
else:
os_type = OSType.OTHER
print("Unsupported OS to get cpu avx, use default")
return os_type, env_cpu_avx if env_cpu_avx else cpu_avx
if "avx512" in output.lower():
cpu_avx = AVXType.AVX512
elif "avx2" in output.lower():
cpu_avx = AVXType.AVX2
elif "avx " in output.lower():
# cpu_avx = AVXType.AVX
pass
return os_type, env_cpu_avx if env_cpu_avx else cpu_avx
def get_cuda_version_from_torch():
try:
import torch
return torch.version.cuda
except:
return None
def get_cuda_version_from_nvcc():
try:
output = subprocess.check_output(["nvcc", "--version"])
version_line = [
line for line in output.decode("utf-8").split("\n") if "release" in line
][0]
return version_line.split("release")[-1].strip().split(",")[0]
except:
return None
def get_cuda_version_from_nvidia_smi():
try:
output = subprocess.check_output(["nvidia-smi"]).decode("utf-8")
match = re.search(r"CUDA Version:\s+(\d+\.\d+)", output)
if match:
return match.group(1)
else:
return None
except:
return None
def get_cuda_version() -> str:
try:
cuda_version = get_cuda_version_from_torch()
if not cuda_version:
cuda_version = get_cuda_version_from_nvcc()
if not cuda_version:
cuda_version = get_cuda_version_from_nvidia_smi()
return cuda_version
except Exception:
return None
def torch_requires(
torch_version: str = "2.0.1",
torchvision_version: str = "0.15.2",
torchaudio_version: str = "2.0.2",
):
torch_pkgs = [
f"torch=={torch_version}",
f"torchvision=={torchvision_version}",
f"torchaudio=={torchaudio_version}",
]
torch_cuda_pkgs = []
os_type, _ = get_cpu_avx_support()
if os_type != OSType.DARWIN:
cuda_version = get_cuda_version()
if cuda_version:
supported_versions = ["11.7", "11.8"]
if cuda_version not in supported_versions:
print(
f"PyTorch version {torch_version} supported cuda version: {supported_versions}, replace to {supported_versions[-1]}"
)
cuda_version = supported_versions[-1]
cuda_version = "cu" + cuda_version.replace(".", "")
py_version = "cp310"
os_pkg_name = "linux_x86_64" if os_type == OSType.LINUX else "win_amd64"
torch_url = f"https://download.pytorch.org/whl/{cuda_version}/torch-{torch_version}+{cuda_version}-{py_version}-{py_version}-{os_pkg_name}.whl"
torchvision_url = f"https://download.pytorch.org/whl/{cuda_version}/torchvision-{torchvision_version}+{cuda_version}-{py_version}-{py_version}-{os_pkg_name}.whl"
torch_url_cached = cache_package(
torch_url, "torch", os_type == OSType.WINDOWS
)
torchvision_url_cached = cache_package(
torchvision_url, "torchvision", os_type == OSType.WINDOWS
)
torch_cuda_pkgs = [
f"torch @ {torch_url_cached}",
f"torchvision @ {torchvision_url_cached}",
f"torchaudio=={torchaudio_version}",
]
setup_spec.extras["torch"] = torch_pkgs
setup_spec.extras["torch_cpu"] = torch_pkgs
setup_spec.extras["torch_cuda"] = torch_cuda_pkgs
def llama_cpp_python_cuda_requires():
cuda_version = get_cuda_version()
device = "cpu"
if not cuda_version:
print("CUDA not support, use cpu version")
return
if not LLAMA_CPP_GPU_ACCELERATION:
print("Disable GPU acceleration")
return
# Supports GPU acceleration
device = "cu" + cuda_version.replace(".", "")
os_type, cpu_avx = get_cpu_avx_support()
print(f"OS: {os_type}, cpu avx: {cpu_avx}")
supported_os = [OSType.WINDOWS, OSType.LINUX]
if os_type not in supported_os:
print(
f"llama_cpp_python_cuda just support in os: {[r._value_ for r in supported_os]}"
)
return
cpu_device = ""
if cpu_avx == AVXType.AVX2 or cpu_avx == AVXType.AVX512:
cpu_device = "avx"
else:
cpu_device = "basic"
device += cpu_device
base_url = "https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui"
llama_cpp_version = "0.2.10"
py_version = "cp310"
os_pkg_name = "manylinux_2_31_x86_64" if os_type == OSType.LINUX else "win_amd64"
extra_index_url = f"{base_url}/llama_cpp_python_cuda-{llama_cpp_version}+{device}-{py_version}-{py_version}-{os_pkg_name}.whl"
extra_index_url, _ = encode_url(extra_index_url)
print(f"Install llama_cpp_python_cuda from {extra_index_url}")
setup_spec.extras["llama_cpp"].append(f"llama_cpp_python_cuda @ {extra_index_url}")
def core_requires():
"""
pip install db-gpt or pip install "db-gpt[core]"
"""
setup_spec.extras["core"] = [
"aiohttp==3.8.4",
"chardet==5.1.0",
"importlib-resources==5.12.0",
"psutil==5.9.4",
"python-dotenv==1.0.0",
"colorama==0.4.6",
"prettytable",
"cachetools",
]
setup_spec.extras["framework"] = [
"fschat",
"coloredlogs",
"httpx",
"sqlparse==0.4.4",
"seaborn",
# https://github.com/eosphoros-ai/DB-GPT/issues/551
"pandas==2.0.3",
"auto-gpt-plugin-template",
"gTTS==2.3.1",
"langchain>=0.0.286",
"SQLAlchemy==2.0.22",
"fastapi==0.98.0",
"pymysql",
"duckdb==0.8.1",
"duckdb-engine",
"jsonschema",
# TODO move transformers to default
"transformers>=4.31.0",
"alembic==1.12.0",
# for excel
"openpyxl",
]
def knowledge_requires():
"""
pip install "db-gpt[knowledge]"
"""
setup_spec.extras["knowledge"] = [
"spacy==3.5.3",
"chromadb==0.4.10",
"markdown",
"bs4",
"python-pptx",
"python-docx",
"pypdf",
"python-multipart",
]
def llama_cpp_requires():
"""
pip install "db-gpt[llama_cpp]"
"""
setup_spec.extras["llama_cpp"] = ["llama-cpp-python"]
llama_cpp_python_cuda_requires()
def quantization_requires():
pkgs = []
os_type, _ = get_cpu_avx_support()
if os_type != OSType.WINDOWS:
pkgs = ["bitsandbytes"]
else:
latest_version = get_latest_version(
"bitsandbytes",
"https://jllllll.github.io/bitsandbytes-windows-webui",
"0.41.1",
)
extra_index_url = f"https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-{latest_version}-py3-none-win_amd64.whl"
local_pkg = cache_package(
extra_index_url, "bitsandbytes", os_type == OSType.WINDOWS
)
pkgs = [f"bitsandbytes @ {local_pkg}"]
print(pkgs)
# For chatglm2-6b-int4
pkgs += ["cpm_kernels"]
setup_spec.extras["quantization"] = pkgs
def all_vector_store_requires():
"""
pip install "db-gpt[vstore]"
"""
setup_spec.extras["vstore"] = [
"grpcio==1.47.5", # maybe delete it
"pymilvus==2.2.1",
"weaviate-client",
]
def all_datasource_requires():
"""
pip install "db-gpt[datasource]"
"""
setup_spec.extras["datasource"] = ["pymssql", "pymysql", "pyspark", "psycopg2"]
def openai_requires():
"""
pip install "db-gpt[openai]"
"""
setup_spec.extras["openai"] = ["openai", "tiktoken"]
setup_spec.extras["openai"] += setup_spec.extras["framework"]
setup_spec.extras["openai"] += setup_spec.extras["knowledge"]
def gpt4all_requires():
"""
pip install "db-gpt[gpt4all]"
"""
setup_spec.extras["gpt4all"] = ["gpt4all"]
def vllm_requires():
"""
pip install "db-gpt[vllm]"
"""
setup_spec.extras["vllm"] = ["vllm"]
# def chat_scene():
# setup_spec.extras["chat"] = [
# ""
# ]
def default_requires():
"""
pip install "db-gpt[default]"
"""
setup_spec.extras["default"] = [
"tokenizers==0.13.3",
"accelerate>=0.20.3",
"sentence-transformers",
"protobuf==3.20.3",
"zhipuai",
"dashscope",
"chardet",
"GitPython",
]
setup_spec.extras["default"] += setup_spec.extras["framework"]
setup_spec.extras["default"] += setup_spec.extras["knowledge"]
setup_spec.extras["default"] += setup_spec.extras["torch"]
setup_spec.extras["default"] += setup_spec.extras["quantization"]
def all_requires():
requires = set()
for _, pkgs in setup_spec.extras.items():
for pkg in pkgs:
requires.add(pkg)
setup_spec.extras["all"] = list(requires)
def init_install_requires():
setup_spec.install_requires += setup_spec.extras["core"]
print(f"Install requires: \n{','.join(setup_spec.install_requires)}")
core_requires()
torch_requires()
knowledge_requires()
llama_cpp_requires()
quantization_requires()
all_vector_store_requires()
all_datasource_requires()
openai_requires()
gpt4all_requires()
vllm_requires()
# must be last
default_requires()
all_requires()
init_install_requires()
setuptools.setup(
name="db-gpt",
packages=find_packages(exclude=("tests", "*.tests", "*.tests.*", "examples")),
version="0.4.1",
author="csunny",
author_email="cfqcsunny@gmail.com",
description="DB-GPT is an experimental open-source project that uses localized GPT large models to interact with your data and environment."
" With this solution, you can be assured that there is no risk of data leakage, and your data is 100% private and secure.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=setup_spec.install_requires,
url="https://github.com/eosphoros-ai/DB-GPT",
license="https://opensource.org/license/mit/",
python_requires=">=3.10",
extras_require=setup_spec.extras,
entry_points={
"console_scripts": [
"dbgpt=pilot.scripts.cli_scripts:main",
],
},
)
| 0 | 248 | [{"tag": "EMAIL", "value": "cfqcsunny@gmail.com", "start": 13595, "end": 13614}] | true | 1 | from typing import List, Tuple
import setuptools
import platform
import subprocess
import os
from enum import Enum
import urllib.request
from urllib.parse import urlparse, quote
import re
import shutil
from setuptools import find_packages
with open("README.md", mode="r", encoding="utf-8") as fh:
long_description = fh.read()
BUILD_NO_CACHE = os.getenv("BUILD_NO_CACHE", "true").lower() == "true"
LLAMA_CPP_GPU_ACCELERATION = (
os.getenv("LLAMA_CPP_GPU_ACCELERATION", "true").lower() == "true"
)
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip()
for require in f
if require.strip() and not require.startswith("#")
]
def get_latest_version(package_name: str, index_url: str, default_version: str):
command = [
"python",
"-m",
"pip",
"index",
"versions",
package_name,
"--index-url",
index_url,
]
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
print("Error executing command.")
print(result.stderr.decode())
return default_version
output = result.stdout.decode()
lines = output.split("\n")
for line in lines:
if "Available versions:" in line:
available_versions = line.split(":")[1].strip()
latest_version = available_versions.split(",")[0].strip()
return latest_version
return default_version
def encode_url(package_url: str) -> str:
parsed_url = urlparse(package_url)
encoded_path = quote(parsed_url.path)
safe_url = parsed_url._replace(path=encoded_path).geturl()
return safe_url, parsed_url.path
def cache_package(package_url: str, package_name: str, is_windows: bool = False):
safe_url, parsed_url = encode_url(package_url)
if BUILD_NO_CACHE:
return safe_url
from pip._internal.utils.appdirs import user_cache_dir
filename = os.path.basename(parsed_url)
cache_dir = os.path.join(user_cache_dir("pip"), "http", "wheels", package_name)
os.makedirs(cache_dir, exist_ok=True)
local_path = os.path.join(cache_dir, filename)
if not os.path.exists(local_path):
temp_path = local_path + ".tmp"
if os.path.exists(temp_path):
os.remove(temp_path)
try:
print(f"Download {safe_url} to {local_path}")
urllib.request.urlretrieve(safe_url, temp_path)
shutil.move(temp_path, local_path)
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
return f"file:///{local_path}" if is_windows else f"file://{local_path}"
class SetupSpec:
def __init__(self) -> None:
self.extras: dict = {}
self.install_requires: List[str] = []
setup_spec = SetupSpec()
class AVXType(Enum):
BASIC = "basic"
AVX = "AVX"
AVX2 = "AVX2"
AVX512 = "AVX512"
@staticmethod
def of_type(avx: str):
for item in AVXType:
if item._value_ == avx:
return item
return None
class OSType(Enum):
WINDOWS = "win"
LINUX = "linux"
DARWIN = "darwin"
OTHER = "other"
def get_cpu_avx_support() -> Tuple[OSType, AVXType]:
system = platform.system()
os_type = OSType.OTHER
cpu_avx = AVXType.BASIC
env_cpu_avx = AVXType.of_type(os.getenv("DBGPT_LLAMA_CPP_AVX"))
if "windows" in system.lower():
os_type = OSType.WINDOWS
output = "avx2"
print("Current platform is windows, use avx2 as default cpu architecture")
elif system == "Linux":
os_type = OSType.LINUX
result = subprocess.run(
["lscpu"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output = result.stdout.decode()
elif system == "Darwin":
os_type = OSType.DARWIN
result = subprocess.run(
["sysctl", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output = result.stdout.decode()
else:
os_type = OSType.OTHER
print("Unsupported OS to get cpu avx, use default")
return os_type, env_cpu_avx if env_cpu_avx else cpu_avx
if "avx512" in output.lower():
cpu_avx = AVXType.AVX512
elif "avx2" in output.lower():
cpu_avx = AVXType.AVX2
elif "avx " in output.lower():
# cpu_avx = AVXType.AVX
pass
return os_type, env_cpu_avx if env_cpu_avx else cpu_avx
def get_cuda_version_from_torch():
try:
import torch
return torch.version.cuda
except:
return None
def get_cuda_version_from_nvcc():
try:
output = subprocess.check_output(["nvcc", "--version"])
version_line = [
line for line in output.decode("utf-8").split("\n") if "release" in line
][0]
return version_line.split("release")[-1].strip().split(",")[0]
except:
return None
def get_cuda_version_from_nvidia_smi():
try:
output = subprocess.check_output(["nvidia-smi"]).decode("utf-8")
match = re.search(r"CUDA Version:\s+(\d+\.\d+)", output)
if match:
return match.group(1)
else:
return None
except:
return None
def get_cuda_version() -> str:
try:
cuda_version = get_cuda_version_from_torch()
if not cuda_version:
cuda_version = get_cuda_version_from_nvcc()
if not cuda_version:
cuda_version = get_cuda_version_from_nvidia_smi()
return cuda_version
except Exception:
return None
def torch_requires(
torch_version: str = "2.0.1",
torchvision_version: str = "0.15.2",
torchaudio_version: str = "2.0.2",
):
torch_pkgs = [
f"torch=={torch_version}",
f"torchvision=={torchvision_version}",
f"torchaudio=={torchaudio_version}",
]
torch_cuda_pkgs = []
os_type, _ = get_cpu_avx_support()
if os_type != OSType.DARWIN:
cuda_version = get_cuda_version()
if cuda_version:
supported_versions = ["11.7", "11.8"]
if cuda_version not in supported_versions:
print(
f"PyTorch version {torch_version} supported cuda version: {supported_versions}, replace to {supported_versions[-1]}"
)
cuda_version = supported_versions[-1]
cuda_version = "cu" + cuda_version.replace(".", "")
py_version = "cp310"
os_pkg_name = "linux_x86_64" if os_type == OSType.LINUX else "win_amd64"
torch_url = f"https://download.pytorch.org/whl/{cuda_version}/torch-{torch_version}+{cuda_version}-{py_version}-{py_version}-{os_pkg_name}.whl"
torchvision_url = f"https://download.pytorch.org/whl/{cuda_version}/torchvision-{torchvision_version}+{cuda_version}-{py_version}-{py_version}-{os_pkg_name}.whl"
torch_url_cached = cache_package(
torch_url, "torch", os_type == OSType.WINDOWS
)
torchvision_url_cached = cache_package(
torchvision_url, "torchvision", os_type == OSType.WINDOWS
)
torch_cuda_pkgs = [
f"torch @ {torch_url_cached}",
f"torchvision @ {torchvision_url_cached}",
f"torchaudio=={torchaudio_version}",
]
setup_spec.extras["torch"] = torch_pkgs
setup_spec.extras["torch_cpu"] = torch_pkgs
setup_spec.extras["torch_cuda"] = torch_cuda_pkgs
def llama_cpp_python_cuda_requires():
cuda_version = get_cuda_version()
device = "cpu"
if not cuda_version:
print("CUDA not support, use cpu version")
return
if not LLAMA_CPP_GPU_ACCELERATION:
print("Disable GPU acceleration")
return
# Supports GPU acceleration
device = "cu" + cuda_version.replace(".", "")
os_type, cpu_avx = get_cpu_avx_support()
print(f"OS: {os_type}, cpu avx: {cpu_avx}")
supported_os = [OSType.WINDOWS, OSType.LINUX]
if os_type not in supported_os:
print(
f"llama_cpp_python_cuda just support in os: {[r._value_ for r in supported_os]}"
)
return
cpu_device = ""
if cpu_avx == AVXType.AVX2 or cpu_avx == AVXType.AVX512:
cpu_device = "avx"
else:
cpu_device = "basic"
device += cpu_device
base_url = "https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui"
llama_cpp_version = "0.2.10"
py_version = "cp310"
os_pkg_name = "manylinux_2_31_x86_64" if os_type == OSType.LINUX else "win_amd64"
extra_index_url = f"{base_url}/llama_cpp_python_cuda-{llama_cpp_version}+{device}-{py_version}-{py_version}-{os_pkg_name}.whl"
extra_index_url, _ = encode_url(extra_index_url)
print(f"Install llama_cpp_python_cuda from {extra_index_url}")
setup_spec.extras["llama_cpp"].append(f"llama_cpp_python_cuda @ {extra_index_url}")
def core_requires():
"""
pip install db-gpt or pip install "db-gpt[core]"
"""
setup_spec.extras["core"] = [
"aiohttp==3.8.4",
"chardet==5.1.0",
"importlib-resources==5.12.0",
"psutil==5.9.4",
"python-dotenv==1.0.0",
"colorama==0.4.6",
"prettytable",
"cachetools",
]
setup_spec.extras["framework"] = [
"fschat",
"coloredlogs",
"httpx",
"sqlparse==0.4.4",
"seaborn",
# https://github.com/eosphoros-ai/DB-GPT/issues/551
"pandas==2.0.3",
"auto-gpt-plugin-template",
"gTTS==2.3.1",
"langchain>=0.0.286",
"SQLAlchemy==2.0.22",
"fastapi==0.98.0",
"pymysql",
"duckdb==0.8.1",
"duckdb-engine",
"jsonschema",
# TODO move transformers to default
"transformers>=4.31.0",
"alembic==1.12.0",
# for excel
"openpyxl",
]
def knowledge_requires():
"""
pip install "db-gpt[knowledge]"
"""
setup_spec.extras["knowledge"] = [
"spacy==3.5.3",
"chromadb==0.4.10",
"markdown",
"bs4",
"python-pptx",
"python-docx",
"pypdf",
"python-multipart",
]
def llama_cpp_requires():
"""
pip install "db-gpt[llama_cpp]"
"""
setup_spec.extras["llama_cpp"] = ["llama-cpp-python"]
llama_cpp_python_cuda_requires()
def quantization_requires():
pkgs = []
os_type, _ = get_cpu_avx_support()
if os_type != OSType.WINDOWS:
pkgs = ["bitsandbytes"]
else:
latest_version = get_latest_version(
"bitsandbytes",
"https://jllllll.github.io/bitsandbytes-windows-webui",
"0.41.1",
)
extra_index_url = f"https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-{latest_version}-py3-none-win_amd64.whl"
local_pkg = cache_package(
extra_index_url, "bitsandbytes", os_type == OSType.WINDOWS
)
pkgs = [f"bitsandbytes @ {local_pkg}"]
print(pkgs)
# For chatglm2-6b-int4
pkgs += ["cpm_kernels"]
setup_spec.extras["quantization"] = pkgs
def all_vector_store_requires():
"""
pip install "db-gpt[vstore]"
"""
setup_spec.extras["vstore"] = [
"grpcio==1.47.5", # maybe delete it
"pymilvus==2.2.1",
"weaviate-client",
]
def all_datasource_requires():
"""
pip install "db-gpt[datasource]"
"""
setup_spec.extras["datasource"] = ["pymssql", "pymysql", "pyspark", "psycopg2"]
def openai_requires():
"""
pip install "db-gpt[openai]"
"""
setup_spec.extras["openai"] = ["openai", "tiktoken"]
setup_spec.extras["openai"] += setup_spec.extras["framework"]
setup_spec.extras["openai"] += setup_spec.extras["knowledge"]
def gpt4all_requires():
"""
pip install "db-gpt[gpt4all]"
"""
setup_spec.extras["gpt4all"] = ["gpt4all"]
def vllm_requires():
"""
pip install "db-gpt[vllm]"
"""
setup_spec.extras["vllm"] = ["vllm"]
# def chat_scene():
# setup_spec.extras["chat"] = [
# ""
# ]
def default_requires():
"""
pip install "db-gpt[default]"
"""
setup_spec.extras["default"] = [
"tokenizers==0.13.3",
"accelerate>=0.20.3",
"sentence-transformers",
"protobuf==3.20.3",
"zhipuai",
"dashscope",
"chardet",
"GitPython",
]
setup_spec.extras["default"] += setup_spec.extras["framework"]
setup_spec.extras["default"] += setup_spec.extras["knowledge"]
setup_spec.extras["default"] += setup_spec.extras["torch"]
setup_spec.extras["default"] += setup_spec.extras["quantization"]
def all_requires():
requires = set()
for _, pkgs in setup_spec.extras.items():
for pkg in pkgs:
requires.add(pkg)
setup_spec.extras["all"] = list(requires)
def init_install_requires():
setup_spec.install_requires += setup_spec.extras["core"]
print(f"Install requires: \n{','.join(setup_spec.install_requires)}")
core_requires()
torch_requires()
knowledge_requires()
llama_cpp_requires()
quantization_requires()
all_vector_store_requires()
all_datasource_requires()
openai_requires()
gpt4all_requires()
vllm_requires()
# must be last
default_requires()
all_requires()
init_install_requires()
setuptools.setup(
name="db-gpt",
packages=find_packages(exclude=("tests", "*.tests", "*.tests.*", "examples")),
version="0.4.1",
author="csunny",
author_email="dycjh@example.com",
description="DB-GPT is an experimental open-source project that uses localized GPT large models to interact with your data and environment."
" With this solution, you can be assured that there is no risk of data leakage, and your data is 100% private and secure.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=setup_spec.install_requires,
url="https://github.com/eosphoros-ai/DB-GPT",
license="https://opensource.org/license/mit/",
python_requires=">=3.10",
extras_require=setup_spec.extras,
entry_points={
"console_scripts": [
"dbgpt=pilot.scripts.cli_scripts:main",
],
},
)
| true | from typing import List, Tuple
import setuptools
import platform
import subprocess
import os
from enum import Enum
import urllib.request
from urllib.parse import urlparse, quote
import re
import shutil
from setuptools import find_packages
with open("README.md", mode="r", encoding="utf-8") as fh:
long_description = fh.read()
BUILD_NO_CACHE = os.getenv("BUILD_NO_CACHE", "true").lower() == "true"
LLAMA_CPP_GPU_ACCELERATION = (
os.getenv("LLAMA_CPP_GPU_ACCELERATION", "true").lower() == "true"
)
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip()
for require in f
if require.strip() and not require.startswith("#")
]
def get_latest_version(package_name: str, index_url: str, default_version: str):
command = [
"python",
"-m",
"pip",
"index",
"versions",
package_name,
"--index-url",
index_url,
]
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
print("Error executing command.")
print(result.stderr.decode())
return default_version
output = result.stdout.decode()
lines = output.split("\n")
for line in lines:
if "Available versions:" in line:
available_versions = line.split(":")[1].strip()
latest_version = available_versions.split(",")[0].strip()
return latest_version
return default_version
def encode_url(package_url: str) -> str:
parsed_url = urlparse(package_url)
encoded_path = quote(parsed_url.path)
safe_url = parsed_url._replace(path=encoded_path).geturl()
return safe_url, parsed_url.path
def cache_package(package_url: str, package_name: str, is_windows: bool = False):
safe_url, parsed_url = encode_url(package_url)
if BUILD_NO_CACHE:
return safe_url
from pip._internal.utils.appdirs import user_cache_dir
filename = os.path.basename(parsed_url)
cache_dir = os.path.join(user_cache_dir("pip"), "http", "wheels", package_name)
os.makedirs(cache_dir, exist_ok=True)
local_path = os.path.join(cache_dir, filename)
if not os.path.exists(local_path):
temp_path = local_path + ".tmp"
if os.path.exists(temp_path):
os.remove(temp_path)
try:
print(f"Download {safe_url} to {local_path}")
urllib.request.urlretrieve(safe_url, temp_path)
shutil.move(temp_path, local_path)
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
return f"file:///{local_path}" if is_windows else f"file://{local_path}"
class SetupSpec:
def __init__(self) -> None:
self.extras: dict = {}
self.install_requires: List[str] = []
setup_spec = SetupSpec()
class AVXType(Enum):
BASIC = "basic"
AVX = "AVX"
AVX2 = "AVX2"
AVX512 = "AVX512"
@staticmethod
def of_type(avx: str):
for item in AVXType:
if item._value_ == avx:
return item
return None
class OSType(Enum):
WINDOWS = "win"
LINUX = "linux"
DARWIN = "darwin"
OTHER = "other"
def get_cpu_avx_support() -> Tuple[OSType, AVXType]:
system = platform.system()
os_type = OSType.OTHER
cpu_avx = AVXType.BASIC
env_cpu_avx = AVXType.of_type(os.getenv("DBGPT_LLAMA_CPP_AVX"))
if "windows" in system.lower():
os_type = OSType.WINDOWS
output = "avx2"
print("Current platform is windows, use avx2 as default cpu architecture")
elif system == "Linux":
os_type = OSType.LINUX
result = subprocess.run(
["lscpu"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output = result.stdout.decode()
elif system == "Darwin":
os_type = OSType.DARWIN
result = subprocess.run(
["sysctl", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output = result.stdout.decode()
else:
os_type = OSType.OTHER
print("Unsupported OS to get cpu avx, use default")
return os_type, env_cpu_avx if env_cpu_avx else cpu_avx
if "avx512" in output.lower():
cpu_avx = AVXType.AVX512
elif "avx2" in output.lower():
cpu_avx = AVXType.AVX2
elif "avx " in output.lower():
# cpu_avx = AVXType.AVX
pass
return os_type, env_cpu_avx if env_cpu_avx else cpu_avx
def get_cuda_version_from_torch():
try:
import torch
return torch.version.cuda
except:
return None
def get_cuda_version_from_nvcc():
try:
output = subprocess.check_output(["nvcc", "--version"])
version_line = [
line for line in output.decode("utf-8").split("\n") if "release" in line
][0]
return version_line.split("release")[-1].strip().split(",")[0]
except:
return None
def get_cuda_version_from_nvidia_smi():
try:
output = subprocess.check_output(["nvidia-smi"]).decode("utf-8")
match = re.search(r"CUDA Version:\s+(\d+\.\d+)", output)
if match:
return match.group(1)
else:
return None
except:
return None
def get_cuda_version() -> str:
try:
cuda_version = get_cuda_version_from_torch()
if not cuda_version:
cuda_version = get_cuda_version_from_nvcc()
if not cuda_version:
cuda_version = get_cuda_version_from_nvidia_smi()
return cuda_version
except Exception:
return None
def torch_requires(
torch_version: str = "2.0.1",
torchvision_version: str = "0.15.2",
torchaudio_version: str = "2.0.2",
):
torch_pkgs = [
f"torch=={torch_version}",
f"torchvision=={torchvision_version}",
f"torchaudio=={torchaudio_version}",
]
torch_cuda_pkgs = []
os_type, _ = get_cpu_avx_support()
if os_type != OSType.DARWIN:
cuda_version = get_cuda_version()
if cuda_version:
supported_versions = ["11.7", "11.8"]
if cuda_version not in supported_versions:
print(
f"PyTorch version {torch_version} supported cuda version: {supported_versions}, replace to {supported_versions[-1]}"
)
cuda_version = supported_versions[-1]
cuda_version = "cu" + cuda_version.replace(".", "")
py_version = "cp310"
os_pkg_name = "linux_x86_64" if os_type == OSType.LINUX else "win_amd64"
torch_url = f"https://download.pytorch.org/whl/{cuda_version}/torch-{torch_version}+{cuda_version}-{py_version}-{py_version}-{os_pkg_name}.whl"
torchvision_url = f"https://download.pytorch.org/whl/{cuda_version}/torchvision-{torchvision_version}+{cuda_version}-{py_version}-{py_version}-{os_pkg_name}.whl"
torch_url_cached = cache_package(
torch_url, "torch", os_type == OSType.WINDOWS
)
torchvision_url_cached = cache_package(
torchvision_url, "torchvision", os_type == OSType.WINDOWS
)
torch_cuda_pkgs = [
f"torch @ {torch_url_cached}",
f"torchvision @ {torchvision_url_cached}",
f"torchaudio=={torchaudio_version}",
]
setup_spec.extras["torch"] = torch_pkgs
setup_spec.extras["torch_cpu"] = torch_pkgs
setup_spec.extras["torch_cuda"] = torch_cuda_pkgs
def llama_cpp_python_cuda_requires():
cuda_version = get_cuda_version()
device = "cpu"
if not cuda_version:
print("CUDA not support, use cpu version")
return
if not LLAMA_CPP_GPU_ACCELERATION:
print("Disable GPU acceleration")
return
# Supports GPU acceleration
device = "cu" + cuda_version.replace(".", "")
os_type, cpu_avx = get_cpu_avx_support()
print(f"OS: {os_type}, cpu avx: {cpu_avx}")
supported_os = [OSType.WINDOWS, OSType.LINUX]
if os_type not in supported_os:
print(
f"llama_cpp_python_cuda just support in os: {[r._value_ for r in supported_os]}"
)
return
cpu_device = ""
if cpu_avx == AVXType.AVX2 or cpu_avx == AVXType.AVX512:
cpu_device = "avx"
else:
cpu_device = "basic"
device += cpu_device
base_url = "https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui"
llama_cpp_version = "0.2.10"
py_version = "cp310"
os_pkg_name = "manylinux_2_31_x86_64" if os_type == OSType.LINUX else "win_amd64"
extra_index_url = f"{base_url}/llama_cpp_python_cuda-{llama_cpp_version}+{device}-{py_version}-{py_version}-{os_pkg_name}.whl"
extra_index_url, _ = encode_url(extra_index_url)
print(f"Install llama_cpp_python_cuda from {extra_index_url}")
setup_spec.extras["llama_cpp"].append(f"llama_cpp_python_cuda @ {extra_index_url}")
def core_requires():
"""
pip install db-gpt or pip install "db-gpt[core]"
"""
setup_spec.extras["core"] = [
"aiohttp==3.8.4",
"chardet==5.1.0",
"importlib-resources==5.12.0",
"psutil==5.9.4",
"python-dotenv==1.0.0",
"colorama==0.4.6",
"prettytable",
"cachetools",
]
setup_spec.extras["framework"] = [
"fschat",
"coloredlogs",
"httpx",
"sqlparse==0.4.4",
"seaborn",
# https://github.com/eosphoros-ai/DB-GPT/issues/551
"pandas==2.0.3",
"auto-gpt-plugin-template",
"gTTS==2.3.1",
"langchain>=0.0.286",
"SQLAlchemy==2.0.22",
"fastapi==0.98.0",
"pymysql",
"duckdb==0.8.1",
"duckdb-engine",
"jsonschema",
# TODO move transformers to default
"transformers>=4.31.0",
"alembic==1.12.0",
# for excel
"openpyxl",
]
def knowledge_requires():
"""
pip install "db-gpt[knowledge]"
"""
setup_spec.extras["knowledge"] = [
"spacy==3.5.3",
"chromadb==0.4.10",
"markdown",
"bs4",
"python-pptx",
"python-docx",
"pypdf",
"python-multipart",
]
def llama_cpp_requires():
"""
pip install "db-gpt[llama_cpp]"
"""
setup_spec.extras["llama_cpp"] = ["llama-cpp-python"]
llama_cpp_python_cuda_requires()
def quantization_requires():
pkgs = []
os_type, _ = get_cpu_avx_support()
if os_type != OSType.WINDOWS:
pkgs = ["bitsandbytes"]
else:
latest_version = get_latest_version(
"bitsandbytes",
"https://jllllll.github.io/bitsandbytes-windows-webui",
"0.41.1",
)
extra_index_url = f"https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-{latest_version}-py3-none-win_amd64.whl"
local_pkg = cache_package(
extra_index_url, "bitsandbytes", os_type == OSType.WINDOWS
)
pkgs = [f"bitsandbytes @ {local_pkg}"]
print(pkgs)
# For chatglm2-6b-int4
pkgs += ["cpm_kernels"]
setup_spec.extras["quantization"] = pkgs
def all_vector_store_requires():
"""
pip install "db-gpt[vstore]"
"""
setup_spec.extras["vstore"] = [
"grpcio==1.47.5", # maybe delete it
"pymilvus==2.2.1",
"weaviate-client",
]
def all_datasource_requires():
"""
pip install "db-gpt[datasource]"
"""
setup_spec.extras["datasource"] = ["pymssql", "pymysql", "pyspark", "psycopg2"]
def openai_requires():
"""
pip install "db-gpt[openai]"
"""
setup_spec.extras["openai"] = ["openai", "tiktoken"]
setup_spec.extras["openai"] += setup_spec.extras["framework"]
setup_spec.extras["openai"] += setup_spec.extras["knowledge"]
def gpt4all_requires():
"""
pip install "db-gpt[gpt4all]"
"""
setup_spec.extras["gpt4all"] = ["gpt4all"]
def vllm_requires():
"""
pip install "db-gpt[vllm]"
"""
setup_spec.extras["vllm"] = ["vllm"]
# def chat_scene():
# setup_spec.extras["chat"] = [
# ""
# ]
def default_requires():
"""
pip install "db-gpt[default]"
"""
setup_spec.extras["default"] = [
"tokenizers==0.13.3",
"accelerate>=0.20.3",
"sentence-transformers",
"protobuf==3.20.3",
"zhipuai",
"dashscope",
"chardet",
"GitPython",
]
setup_spec.extras["default"] += setup_spec.extras["framework"]
setup_spec.extras["default"] += setup_spec.extras["knowledge"]
setup_spec.extras["default"] += setup_spec.extras["torch"]
setup_spec.extras["default"] += setup_spec.extras["quantization"]
def all_requires():
requires = set()
for _, pkgs in setup_spec.extras.items():
for pkg in pkgs:
requires.add(pkg)
setup_spec.extras["all"] = list(requires)
def init_install_requires():
setup_spec.install_requires += setup_spec.extras["core"]
print(f"Install requires: \n{','.join(setup_spec.install_requires)}")
core_requires()
torch_requires()
knowledge_requires()
llama_cpp_requires()
quantization_requires()
all_vector_store_requires()
all_datasource_requires()
openai_requires()
gpt4all_requires()
vllm_requires()
# must be last
default_requires()
all_requires()
init_install_requires()
setuptools.setup(
name="db-gpt",
packages=find_packages(exclude=("tests", "*.tests", "*.tests.*", "examples")),
version="0.4.1",
author="csunny",
author_email="PI:EMAIL:dycjh@example.comEND_PI",
description="DB-GPT is an experimental open-source project that uses localized GPT large models to interact with your data and environment."
" With this solution, you can be assured that there is no risk of data leakage, and your data is 100% private and secure.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=setup_spec.install_requires,
url="https://github.com/eosphoros-ai/DB-GPT",
license="https://opensource.org/license/mit/",
python_requires=">=3.10",
extras_require=setup_spec.extras,
entry_points={
"console_scripts": [
"dbgpt=pilot.scripts.cli_scripts:main",
],
},
)
|
hf_public_repos/zilliztech/GPTCache/tests | hf_public_repos/zilliztech/GPTCache/tests/integration_tests/test_redis_onnx.py | import random
from unittest.mock import patch
from gptcache import Cache
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache
from gptcache.embedding import Onnx
from gptcache.manager import manager_factory
from gptcache.processor.pre import last_content
from gptcache.utils.response import get_message_from_openai_answer
def test_redis_sqlite():
encoder = Onnx()
redis_data_managers = [
manager_factory(
"sqlite,redis",
data_dir=str(random.random()),
vector_params={"dimension": encoder.dimension},
),
manager_factory(
"redis,redis",
data_dir=str(random.random()),
scalar_params={"global_key_prefix": "gptcache_scalar"},
vector_params={"dimension": encoder.dimension, "namespace": "gptcache_vector", "collection_name": "cache_vector"},
)
]
for redis_data_manager in redis_data_managers:
redis_cache = Cache()
init_similar_cache(
cache_obj=redis_cache,
pre_func=last_content,
embedding=encoder,
data_manager=redis_data_manager,
)
question = "what's github"
expect_answer = "GitHub is an online platform used primarily for version control and coding collaborations."
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=redis_cache,
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "can you explain what GitHub is"},
],
cache_obj=redis_cache,
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text | 0 | 805 | [{"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 1737, "end": 1775}] | true | 1 | import random
from unittest.mock import patch
from gptcache import Cache
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache
from gptcache.embedding import Onnx
from gptcache.manager import manager_factory
from gptcache.processor.pre import last_content
from gptcache.utils.response import get_message_from_openai_answer
def test_redis_sqlite():
encoder = Onnx()
redis_data_managers = [
manager_factory(
"sqlite,redis",
data_dir=str(random.random()),
vector_params={"dimension": encoder.dimension},
),
manager_factory(
"redis,redis",
data_dir=str(random.random()),
scalar_params={"global_key_prefix": "gptcache_scalar"},
vector_params={"dimension": encoder.dimension, "namespace": "gptcache_vector", "collection_name": "cache_vector"},
)
]
for redis_data_manager in redis_data_managers:
redis_cache = Cache()
init_similar_cache(
cache_obj=redis_cache,
pre_func=last_content,
embedding=encoder,
data_manager=redis_data_manager,
)
question = "what's github"
expect_answer = "GitHub is an online platform used primarily for version control and coding collaborations."
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "caf86f4uutaoxfysmf7anj01xl6sv3ps",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=redis_cache,
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "can you explain what GitHub is"},
],
cache_obj=redis_cache,
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text | true | import random
from unittest.mock import patch
from gptcache import Cache
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache
from gptcache.embedding import Onnx
from gptcache.manager import manager_factory
from gptcache.processor.pre import last_content
from gptcache.utils.response import get_message_from_openai_answer
def test_redis_sqlite():
encoder = Onnx()
redis_data_managers = [
manager_factory(
"sqlite,redis",
data_dir=str(random.random()),
vector_params={"dimension": encoder.dimension},
),
manager_factory(
"redis,redis",
data_dir=str(random.random()),
scalar_params={"global_key_prefix": "gptcache_scalar"},
vector_params={"dimension": encoder.dimension, "namespace": "gptcache_vector", "collection_name": "cache_vector"},
)
]
for redis_data_manager in redis_data_managers:
redis_cache = Cache()
init_similar_cache(
cache_obj=redis_cache,
pre_func=last_content,
embedding=encoder,
data_manager=redis_data_manager,
)
question = "what's github"
expect_answer = "GitHub is an online platform used primarily for version control and coding collaborations."
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=redis_cache,
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "can you explain what GitHub is"},
],
cache_obj=redis_cache,
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text |
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/utilities/zapier.py | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/start/
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to nla@zapier.com for
developer support.
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from requests import Request, Session
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/start/
This wrapper supports both API Key and OAuth Credential auth methods. API Key
is the fastest way to get started using this wrapper.
Call this wrapper with either `zapier_nla_api_key` or
`zapier_nla_oauth_access_token` arguments, or set the `ZAPIER_NLA_API_KEY`
environment variable. If both arguments are set, the Access Token will take
precedence.
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use OAuth. Review the full docs above to learn how to create
your own provider and generate credentials.
"""
zapier_nla_api_key: str
zapier_nla_oauth_access_token: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _format_headers(self) -> Dict[str, str]:
"""Format headers for requests."""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if self.zapier_nla_oauth_access_token:
headers.update(
{"Authorization": f"Bearer {self.zapier_nla_oauth_access_token}"}
)
else:
headers.update({"X-API-Key": self.zapier_nla_api_key})
return headers
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(self._format_headers())
return session
async def _arequest(self, method: str, url: str, **kwargs: Any) -> Dict[str, Any]:
"""Make an async request."""
async with aiohttp.ClientSession(headers=self._format_headers()) as session:
async with session.request(method, url, **kwargs) as response:
response.raise_for_status()
return await response.json()
def _create_action_payload( # type: ignore[no-untyped-def]
self, instructions: str, params: Optional[Dict] = None, preview_only=False
) -> Dict:
"""Create a payload for an action."""
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
if preview_only:
data.update({"preview_only": True})
return data
def _create_action_url(self, action_id: str) -> str:
"""Create a url for an action."""
return self.zapier_nla_api_base + f"exposed/{action_id}/execute/"
def _create_action_request( # type: ignore[no-untyped-def]
self,
action_id: str,
instructions: str,
params: Optional[Dict] = None,
preview_only=False,
) -> Request:
data = self._create_action_payload(instructions, params, preview_only)
return Request(
"POST",
self._create_action_url(action_id),
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key_default = None
# If there is a oauth_access_key passed in the values
# we don't need a nla_api_key it can be blank
if "zapier_nla_oauth_access_token" in values:
zapier_nla_api_key_default = ""
else:
values["zapier_nla_oauth_access_token"] = ""
# we require at least one API Key
zapier_nla_api_key = get_from_dict_or_env(
values,
"zapier_nla_api_key",
"ZAPIER_NLA_API_KEY",
zapier_nla_api_key_default,
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
async def alist(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/docs)
"""
response = await self._arequest("GET", self.zapier_nla_api_base + "exposed/")
return response["results"]
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
session = self._get_session()
try:
response = session.get(self.zapier_nla_api_base + "exposed/")
response.raise_for_status()
except requests.HTTPError as http_err:
if response.status_code == 401:
if self.zapier_nla_oauth_access_token:
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your "
f"access token is correct and doesn't need to be "
f"refreshed. Err: {http_err}",
response=response,
)
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your api "
f"key is correct. Err: {http_err}",
response=response,
)
raise http_err
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._create_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
async def arun(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params),
)
return response["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({"preview_only": True})
request = self._create_action_request(action_id, instructions, params, True)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["input_params"]
async def apreview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params, preview_only=True),
)
return response["result"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
async def arun_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.arun(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
async def apreview_as_str( # type: ignore[no-untyped-def]
self, *args, **kwargs
) -> str:
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.apreview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
async def alist_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = await self.alist()
return json.dumps(actions)
| 0 | 2,670 | [{"tag": "EMAIL", "value": "nla@zapier.com", "start": 501, "end": 515}] | true | 1 | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/start/
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to anpch@example.com for
developer support.
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from requests import Request, Session
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/start/
This wrapper supports both API Key and OAuth Credential auth methods. API Key
is the fastest way to get started using this wrapper.
Call this wrapper with either `zapier_nla_api_key` or
`zapier_nla_oauth_access_token` arguments, or set the `ZAPIER_NLA_API_KEY`
environment variable. If both arguments are set, the Access Token will take
precedence.
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use OAuth. Review the full docs above to learn how to create
your own provider and generate credentials.
"""
zapier_nla_api_key: str
zapier_nla_oauth_access_token: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _format_headers(self) -> Dict[str, str]:
"""Format headers for requests."""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if self.zapier_nla_oauth_access_token:
headers.update(
{"Authorization": f"Bearer {self.zapier_nla_oauth_access_token}"}
)
else:
headers.update({"X-API-Key": self.zapier_nla_api_key})
return headers
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(self._format_headers())
return session
async def _arequest(self, method: str, url: str, **kwargs: Any) -> Dict[str, Any]:
"""Make an async request."""
async with aiohttp.ClientSession(headers=self._format_headers()) as session:
async with session.request(method, url, **kwargs) as response:
response.raise_for_status()
return await response.json()
def _create_action_payload( # type: ignore[no-untyped-def]
self, instructions: str, params: Optional[Dict] = None, preview_only=False
) -> Dict:
"""Create a payload for an action."""
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
if preview_only:
data.update({"preview_only": True})
return data
def _create_action_url(self, action_id: str) -> str:
"""Create a url for an action."""
return self.zapier_nla_api_base + f"exposed/{action_id}/execute/"
def _create_action_request( # type: ignore[no-untyped-def]
self,
action_id: str,
instructions: str,
params: Optional[Dict] = None,
preview_only=False,
) -> Request:
data = self._create_action_payload(instructions, params, preview_only)
return Request(
"POST",
self._create_action_url(action_id),
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key_default = None
# If there is a oauth_access_key passed in the values
# we don't need a nla_api_key it can be blank
if "zapier_nla_oauth_access_token" in values:
zapier_nla_api_key_default = ""
else:
values["zapier_nla_oauth_access_token"] = ""
# we require at least one API Key
zapier_nla_api_key = get_from_dict_or_env(
values,
"zapier_nla_api_key",
"ZAPIER_NLA_API_KEY",
zapier_nla_api_key_default,
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
async def alist(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/docs)
"""
response = await self._arequest("GET", self.zapier_nla_api_base + "exposed/")
return response["results"]
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
session = self._get_session()
try:
response = session.get(self.zapier_nla_api_base + "exposed/")
response.raise_for_status()
except requests.HTTPError as http_err:
if response.status_code == 401:
if self.zapier_nla_oauth_access_token:
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your "
f"access token is correct and doesn't need to be "
f"refreshed. Err: {http_err}",
response=response,
)
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your api "
f"key is correct. Err: {http_err}",
response=response,
)
raise http_err
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._create_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
async def arun(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params),
)
return response["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({"preview_only": True})
request = self._create_action_request(action_id, instructions, params, True)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["input_params"]
async def apreview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params, preview_only=True),
)
return response["result"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
async def arun_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.arun(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
async def apreview_as_str( # type: ignore[no-untyped-def]
self, *args, **kwargs
) -> str:
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.apreview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
async def alist_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = await self.alist()
return json.dumps(actions)
| true | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/start/
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to PI:EMAIL:anpch@example.comEND_PI for
developer support.
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from requests import Request, Session
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/start/
This wrapper supports both API Key and OAuth Credential auth methods. API Key
is the fastest way to get started using this wrapper.
Call this wrapper with either `zapier_nla_api_key` or
`zapier_nla_oauth_access_token` arguments, or set the `ZAPIER_NLA_API_KEY`
environment variable. If both arguments are set, the Access Token will take
precedence.
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use OAuth. Review the full docs above to learn how to create
your own provider and generate credentials.
"""
zapier_nla_api_key: str
zapier_nla_oauth_access_token: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _format_headers(self) -> Dict[str, str]:
"""Format headers for requests."""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if self.zapier_nla_oauth_access_token:
headers.update(
{"Authorization": f"Bearer {self.zapier_nla_oauth_access_token}"}
)
else:
headers.update({"X-API-Key": self.zapier_nla_api_key})
return headers
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(self._format_headers())
return session
async def _arequest(self, method: str, url: str, **kwargs: Any) -> Dict[str, Any]:
"""Make an async request."""
async with aiohttp.ClientSession(headers=self._format_headers()) as session:
async with session.request(method, url, **kwargs) as response:
response.raise_for_status()
return await response.json()
def _create_action_payload( # type: ignore[no-untyped-def]
self, instructions: str, params: Optional[Dict] = None, preview_only=False
) -> Dict:
"""Create a payload for an action."""
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
if preview_only:
data.update({"preview_only": True})
return data
def _create_action_url(self, action_id: str) -> str:
"""Create a url for an action."""
return self.zapier_nla_api_base + f"exposed/{action_id}/execute/"
def _create_action_request( # type: ignore[no-untyped-def]
self,
action_id: str,
instructions: str,
params: Optional[Dict] = None,
preview_only=False,
) -> Request:
data = self._create_action_payload(instructions, params, preview_only)
return Request(
"POST",
self._create_action_url(action_id),
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key_default = None
# If there is a oauth_access_key passed in the values
# we don't need a nla_api_key it can be blank
if "zapier_nla_oauth_access_token" in values:
zapier_nla_api_key_default = ""
else:
values["zapier_nla_oauth_access_token"] = ""
# we require at least one API Key
zapier_nla_api_key = get_from_dict_or_env(
values,
"zapier_nla_api_key",
"ZAPIER_NLA_API_KEY",
zapier_nla_api_key_default,
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
async def alist(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/docs)
"""
response = await self._arequest("GET", self.zapier_nla_api_base + "exposed/")
return response["results"]
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
session = self._get_session()
try:
response = session.get(self.zapier_nla_api_base + "exposed/")
response.raise_for_status()
except requests.HTTPError as http_err:
if response.status_code == 401:
if self.zapier_nla_oauth_access_token:
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your "
f"access token is correct and doesn't need to be "
f"refreshed. Err: {http_err}",
response=response,
)
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your api "
f"key is correct. Err: {http_err}",
response=response,
)
raise http_err
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._create_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
async def arun(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params),
)
return response["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({"preview_only": True})
request = self._create_action_request(action_id, instructions, params, True)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["input_params"]
async def apreview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params, preview_only=True),
)
return response["result"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
async def arun_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.arun(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
async def apreview_as_str( # type: ignore[no-untyped-def]
self, *args, **kwargs
) -> str:
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.apreview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
async def alist_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = await self.alist()
return json.dumps(actions)
|
hf_public_repos/langchain-ai/langchain/docs/docs/guides/privacy | hf_public_repos/langchain-ai/langchain/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb | # Install necessary packages
# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker
# ! python -m spacy download en_core_web_lgfrom langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com. "
"By the way, my card number is: 4916 0387 9536 0861"
)# We know this data, as we set the faker_seed parameter
fake_name = "Maria Lynch"
fake_phone = "7344131647"
fake_email = "jamesmichael@example.com"
fake_credit_card = "4838637940262"
anonymized_text = f"""{fake_name} recently lost his wallet.
Inside is some cash and his credit card with the number {fake_credit_card}.
If you would find it, please call at {fake_phone} or write an email here: {fake_email}.
{fake_name} would be very grateful!"""
print(anonymized_text)print(anonymizer.deanonymize(anonymized_text))text = f"""Slim Shady recently lost his wallet.
Inside is some cash and his credit card with the number 4916 0387 9536 0861.
If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com."""from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import ChatOpenAI
anonymizer = PresidioReversibleAnonymizer()
template = """Rewrite this text into an official, short email:
{anonymized_text}"""
prompt = PromptTemplate.from_template(template)
llm = ChatOpenAI(temperature=0)
chain = {"anonymized_text": anonymizer.anonymize} | prompt | llm
response = chain.invoke(text)
print(response.content)chain = chain | (lambda ai_message: anonymizer.deanonymize(ai_message.content))
response = chain.invoke(text)
print(response)from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com. "
"By the way, my card number is: 4916 0387 9536 0861"
)
anonymizer.deanonymizer_mappingprint(
anonymizer.anonymize(
"Do you have his VISA card number? Yep, it's 4001 9192 5753 7193. I'm John Doe by the way."
)
)
anonymizer.deanonymizer_mappingprint(
anonymizer.anonymize(
"My VISA card number is 4001 9192 5753 7193 and my name is John Doe."
)
)
anonymizer.deanonymizer_mapping# We can save the deanonymizer mapping as a JSON or YAML file
anonymizer.save_deanonymizer_mapping("deanonymizer_mapping.json")
# anonymizer.save_deanonymizer_mapping("deanonymizer_mapping.yaml")anonymizer = PresidioReversibleAnonymizer()
anonymizer.deanonymizer_mappinganonymizer.load_deanonymizer_mapping("deanonymizer_mapping.json")
anonymizer.deanonymizer_mappingfrom langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
case_insensitive_matching_strategy,
)
# Original name: Maria Lynch
print(anonymizer.deanonymize("maria lynch"))
print(
anonymizer.deanonymize(
"maria lynch", deanonymizer_matching_strategy=case_insensitive_matching_strategy
)
)from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
fuzzy_matching_strategy,
)
# Original name: Maria Lynch
# Original phone number: 7344131647 (without dashes)
print(anonymizer.deanonymize("Call Maria K. Lynch at 734-413-1647"))
print(
anonymizer.deanonymize(
"Call Maria K. Lynch at 734-413-1647",
deanonymizer_matching_strategy=fuzzy_matching_strategy,
)
)from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
combined_exact_fuzzy_matching_strategy,
)
# Changed some values for fuzzy match showcase:
# - "Maria Lynch" -> "Maria K. Lynch"
# - "7344131647" -> "734-413-1647"
# - "213186379402654" -> "2131 8637 9402 654"
print(
anonymizer.deanonymize(
(
"Are you Maria F. Lynch? I found your card with number 4838 6379 40262.\n"
"Is this your phone number: 734-413-1647?\n"
"Is this your email address: wdavis@example.net"
),
deanonymizer_matching_strategy=combined_exact_fuzzy_matching_strategy,
)
) | 0 | 4,002 | [{"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 690, "end": 715}, {"tag": "EMAIL", "value": "jamesmichael@example.com", "start": 899, "end": 923}, {"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 2589, "end": 2614}, {"tag": "EMAIL", "value": "wdavis@example.net", "start": 4689, "end": 4707}] | true | 4 | # Install necessary packages
# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker
# ! python -m spacy download en_core_web_lgfrom langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at dycjh@example.com. "
"By the way, my card number is: 4916 0387 9536 0861"
)# We know this data, as we set the faker_seed parameter
fake_name = "Maria Lynch"
fake_phone = "7344131647"
fake_email = "efpyi@example.com"
fake_credit_card = "4838637940262"
anonymized_text = f"""{fake_name} recently lost his wallet.
Inside is some cash and his credit card with the number {fake_credit_card}.
If you would find it, please call at {fake_phone} or write an email here: {fake_email}.
{fake_name} would be very grateful!"""
print(anonymized_text)print(anonymizer.deanonymize(anonymized_text))text = f"""Slim Shady recently lost his wallet.
Inside is some cash and his credit card with the number 4916 0387 9536 0861.
If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com."""from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import ChatOpenAI
anonymizer = PresidioReversibleAnonymizer()
template = """Rewrite this text into an official, short email:
{anonymized_text}"""
prompt = PromptTemplate.from_template(template)
llm = ChatOpenAI(temperature=0)
chain = {"anonymized_text": anonymizer.anonymize} | prompt | llm
response = chain.invoke(text)
print(response.content)chain = chain | (lambda ai_message: anonymizer.deanonymize(ai_message.content))
response = chain.invoke(text)
print(response)from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at dycjh@example.com. "
"By the way, my card number is: 4916 0387 9536 0861"
)
anonymizer.deanonymizer_mappingprint(
anonymizer.anonymize(
"Do you have his VISA card number? Yep, it's 4001 9192 5753 7193. I'm John Doe by the way."
)
)
anonymizer.deanonymizer_mappingprint(
anonymizer.anonymize(
"My VISA card number is 4001 9192 5753 7193 and my name is John Doe."
)
)
anonymizer.deanonymizer_mapping# We can save the deanonymizer mapping as a JSON or YAML file
anonymizer.save_deanonymizer_mapping("deanonymizer_mapping.json")
# anonymizer.save_deanonymizer_mapping("deanonymizer_mapping.yaml")anonymizer = PresidioReversibleAnonymizer()
anonymizer.deanonymizer_mappinganonymizer.load_deanonymizer_mapping("deanonymizer_mapping.json")
anonymizer.deanonymizer_mappingfrom langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
case_insensitive_matching_strategy,
)
# Original name: Maria Lynch
print(anonymizer.deanonymize("maria lynch"))
print(
anonymizer.deanonymize(
"maria lynch", deanonymizer_matching_strategy=case_insensitive_matching_strategy
)
)from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
fuzzy_matching_strategy,
)
# Original name: Maria Lynch
# Original phone number: 7344131647 (without dashes)
print(anonymizer.deanonymize("Call Maria K. Lynch at 734-413-1647"))
print(
anonymizer.deanonymize(
"Call Maria K. Lynch at 734-413-1647",
deanonymizer_matching_strategy=fuzzy_matching_strategy,
)
)from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
combined_exact_fuzzy_matching_strategy,
)
# Changed some values for fuzzy match showcase:
# - "Maria Lynch" -> "Maria K. Lynch"
# - "7344131647" -> "734-413-1647"
# - "213186379402654" -> "2131 8637 9402 654"
print(
anonymizer.deanonymize(
(
"Are you Maria F. Lynch? I found your card with number 4838 6379 40262.\n"
"Is this your phone number: 734-413-1647?\n"
"Is this your email address: kenaa@example.com"
),
deanonymizer_matching_strategy=combined_exact_fuzzy_matching_strategy,
)
) | true | # Install necessary packages
# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker
# ! python -m spacy download en_core_web_lgfrom langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at PI:EMAIL:dycjh@example.comEND_PI. "
"By the way, my card number is: 4916 0387 9536 0861"
)# We know this data, as we set the faker_seed parameter
fake_name = "Maria Lynch"
fake_phone = "7344131647"
fake_email = "PI:EMAIL:efpyi@example.comEND_PI"
fake_credit_card = "4838637940262"
anonymized_text = f"""{fake_name} recently lost his wallet.
Inside is some cash and his credit card with the number {fake_credit_card}.
If you would find it, please call at {fake_phone} or write an email here: {fake_email}.
{fake_name} would be very grateful!"""
print(anonymized_text)print(anonymizer.deanonymize(anonymized_text))text = f"""Slim Shady recently lost his wallet.
Inside is some cash and his credit card with the number 4916 0387 9536 0861.
If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com."""from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import ChatOpenAI
anonymizer = PresidioReversibleAnonymizer()
template = """Rewrite this text into an official, short email:
{anonymized_text}"""
prompt = PromptTemplate.from_template(template)
llm = ChatOpenAI(temperature=0)
chain = {"anonymized_text": anonymizer.anonymize} | prompt | llm
response = chain.invoke(text)
print(response.content)chain = chain | (lambda ai_message: anonymizer.deanonymize(ai_message.content))
response = chain.invoke(text)
print(response)from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at PI:EMAIL:dycjh@example.comEND_PI. "
"By the way, my card number is: 4916 0387 9536 0861"
)
anonymizer.deanonymizer_mappingprint(
anonymizer.anonymize(
"Do you have his VISA card number? Yep, it's 4001 9192 5753 7193. I'm John Doe by the way."
)
)
anonymizer.deanonymizer_mappingprint(
anonymizer.anonymize(
"My VISA card number is 4001 9192 5753 7193 and my name is John Doe."
)
)
anonymizer.deanonymizer_mapping# We can save the deanonymizer mapping as a JSON or YAML file
anonymizer.save_deanonymizer_mapping("deanonymizer_mapping.json")
# anonymizer.save_deanonymizer_mapping("deanonymizer_mapping.yaml")anonymizer = PresidioReversibleAnonymizer()
anonymizer.deanonymizer_mappinganonymizer.load_deanonymizer_mapping("deanonymizer_mapping.json")
anonymizer.deanonymizer_mappingfrom langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
case_insensitive_matching_strategy,
)
# Original name: Maria Lynch
print(anonymizer.deanonymize("maria lynch"))
print(
anonymizer.deanonymize(
"maria lynch", deanonymizer_matching_strategy=case_insensitive_matching_strategy
)
)from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
fuzzy_matching_strategy,
)
# Original name: Maria Lynch
# Original phone number: 7344131647 (without dashes)
print(anonymizer.deanonymize("Call Maria K. Lynch at 734-413-1647"))
print(
anonymizer.deanonymize(
"Call Maria K. Lynch at 734-413-1647",
deanonymizer_matching_strategy=fuzzy_matching_strategy,
)
)from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
combined_exact_fuzzy_matching_strategy,
)
# Changed some values for fuzzy match showcase:
# - "Maria Lynch" -> "Maria K. Lynch"
# - "7344131647" -> "734-413-1647"
# - "213186379402654" -> "2131 8637 9402 654"
print(
anonymizer.deanonymize(
(
"Are you Maria F. Lynch? I found your card with number 4838 6379 40262.\n"
"Is this your phone number: 734-413-1647?\n"
"Is this your email address: PI:EMAIL:kenaa@example.comEND_PI"
),
deanonymizer_matching_strategy=combined_exact_fuzzy_matching_strategy,
)
) |
hf_public_repos/zilliztech/GPTCache/tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/test_session.py | import unittest
from unittest.mock import patch
from openai.error import AuthenticationError
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.session import Session
from gptcache.processor.pre import get_prompt
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.utils.response import get_text_from_openai_answer
def check_hit(cur_session_id, cache_session_ids, cache_questions, cache_answer):
if cache_questions and "what" in cache_questions[0]:
return True
return False
class TestSession(unittest.TestCase):
"""Test Session"""
question = "what is your name?"
expect_answer = "gptcache"
session_id = "test_map"
def test_with(self):
data_manager = manager_factory("map", data_dir="./test_session")
cache.init(data_manager=data_manager, pre_embedding_func=get_prompt)
session0 = Session(self.session_id, check_hit_func=check_hit)
self.assertEqual(session0.name, self.session_id)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "text-davinci-003",
"object": "text_completion",
}
with Session() as session:
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
self.assertEqual(len(data_manager.list_sessions()), 0)
def test_map(self):
data_manager = manager_factory("map", data_dir="./test_session")
cache.init(data_manager=data_manager, pre_embedding_func=get_prompt)
session0 = Session(self.session_id, check_hit_func=check_hit)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
session1 = Session()
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
with self.assertRaises(AuthenticationError):
openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
self.assertEqual(len(data_manager.list_sessions()), 2)
session0.drop()
session1.drop()
self.assertEqual(len(data_manager.list_sessions()), 0)
def test_ssd(self):
onnx = Onnx()
data_manager = manager_factory("sqlite,faiss", './test_session', vector_params={"dimension": onnx.dimension})
cache.init(
pre_embedding_func=get_prompt,
embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
session0 = Session(self.session_id, check_hit_func=check_hit)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
session1 = Session()
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
with self.assertRaises(AuthenticationError):
openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
self.assertEqual(len(data_manager.list_sessions()), 2)
session0.drop()
session1.drop()
self.assertEqual(len(data_manager.list_sessions()), 0)
| 0 | 817 | [{"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 1361, "end": 1395}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 2362, "end": 2396}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 4280, "end": 4314}] | true | 3 | import unittest
from unittest.mock import patch
from openai.error import AuthenticationError
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.session import Session
from gptcache.processor.pre import get_prompt
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.utils.response import get_text_from_openai_answer
def check_hit(cur_session_id, cache_session_ids, cache_questions, cache_answer):
if cache_questions and "what" in cache_questions[0]:
return True
return False
class TestSession(unittest.TestCase):
"""Test Session"""
question = "what is your name?"
expect_answer = "gptcache"
session_id = "test_map"
def test_with(self):
data_manager = manager_factory("map", data_dir="./test_session")
cache.init(data_manager=data_manager, pre_embedding_func=get_prompt)
session0 = Session(self.session_id, check_hit_func=check_hit)
self.assertEqual(session0.name, self.session_id)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "caf86f4uutaoxfysmf7anj01xl6sv3ps",
"model": "text-davinci-003",
"object": "text_completion",
}
with Session() as session:
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
self.assertEqual(len(data_manager.list_sessions()), 0)
def test_map(self):
data_manager = manager_factory("map", data_dir="./test_session")
cache.init(data_manager=data_manager, pre_embedding_func=get_prompt)
session0 = Session(self.session_id, check_hit_func=check_hit)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "caf86f4uutaoxfysmf7anj01xl6sv3ps",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
session1 = Session()
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
with self.assertRaises(AuthenticationError):
openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
self.assertEqual(len(data_manager.list_sessions()), 2)
session0.drop()
session1.drop()
self.assertEqual(len(data_manager.list_sessions()), 0)
def test_ssd(self):
onnx = Onnx()
data_manager = manager_factory("sqlite,faiss", './test_session', vector_params={"dimension": onnx.dimension})
cache.init(
pre_embedding_func=get_prompt,
embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
session0 = Session(self.session_id, check_hit_func=check_hit)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "caf86f4uutaoxfysmf7anj01xl6sv3ps",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
session1 = Session()
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
with self.assertRaises(AuthenticationError):
openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
self.assertEqual(len(data_manager.list_sessions()), 2)
session0.drop()
session1.drop()
self.assertEqual(len(data_manager.list_sessions()), 0)
| true | import unittest
from unittest.mock import patch
from openai.error import AuthenticationError
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.session import Session
from gptcache.processor.pre import get_prompt
from gptcache.embedding import Onnx
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.utils.response import get_text_from_openai_answer
def check_hit(cur_session_id, cache_session_ids, cache_questions, cache_answer):
if cache_questions and "what" in cache_questions[0]:
return True
return False
class TestSession(unittest.TestCase):
"""Test Session"""
question = "what is your name?"
expect_answer = "gptcache"
session_id = "test_map"
def test_with(self):
data_manager = manager_factory("map", data_dir="./test_session")
cache.init(data_manager=data_manager, pre_embedding_func=get_prompt)
session0 = Session(self.session_id, check_hit_func=check_hit)
self.assertEqual(session0.name, self.session_id)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"model": "text-davinci-003",
"object": "text_completion",
}
with Session() as session:
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
self.assertEqual(len(data_manager.list_sessions()), 0)
def test_map(self):
data_manager = manager_factory("map", data_dir="./test_session")
cache.init(data_manager=data_manager, pre_embedding_func=get_prompt)
session0 = Session(self.session_id, check_hit_func=check_hit)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
session1 = Session()
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
with self.assertRaises(AuthenticationError):
openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
self.assertEqual(len(data_manager.list_sessions()), 2)
session0.drop()
session1.drop()
self.assertEqual(len(data_manager.list_sessions()), 0)
def test_ssd(self):
onnx = Onnx()
data_manager = manager_factory("sqlite,faiss", './test_session', vector_params={"dimension": onnx.dimension})
cache.init(
pre_embedding_func=get_prompt,
embedding_func=onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=SearchDistanceEvaluation(),
)
session0 = Session(self.session_id, check_hit_func=check_hit)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": self.expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session0)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
session1 = Session()
response = openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
answer_text = get_text_from_openai_answer(response)
self.assertEqual(answer_text, self.expect_answer)
with self.assertRaises(AuthenticationError):
openai.Completion.create(model="text-davinci-003", prompt=self.question, session=session1)
self.assertEqual(len(data_manager.list_sessions()), 2)
session0.drop()
session1.drop()
self.assertEqual(len(data_manager.list_sessions()), 0)
|
hf_public_repos/zilliztech/GPTCache/tests/unit_tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/utils/test_response.py | from gptcache.utils.response import (
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
)
def test_get_message_from_openai_answer():
message = get_message_from_openai_answer(
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {"content": "hello", "role": "assistant"},
}
],
"created": 1677825456,
"id": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337,
},
}
)
assert message == "hello"
def test_get_stream_message_from_openai_answer():
message = get_stream_message_from_openai_answer(
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
)
assert message == ""
message = get_stream_message_from_openai_answer(
{
"choices": [{"delta": {"content": "2"}, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
)
assert message == "2"
| 0 | 862 | [{"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 1119, "end": 1157}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 1498, "end": 1536}, {"tag": "KEY", "value": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ", "start": 501, "end": 539}] | true | 3 | from gptcache.utils.response import (
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
)
def test_get_message_from_openai_answer():
message = get_message_from_openai_answer(
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {"content": "hello", "role": "assistant"},
}
],
"created": 1677825456,
"id": "vqbpgud2ghvjgm1n5hdgjnn5818fzsf2",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337,
},
}
)
assert message == "hello"
def test_get_stream_message_from_openai_answer():
message = get_stream_message_from_openai_answer(
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
)
assert message == ""
message = get_stream_message_from_openai_answer(
{
"choices": [{"delta": {"content": "2"}, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
)
assert message == "2"
| true | from gptcache.utils.response import (
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
)
def test_get_message_from_openai_answer():
message = get_message_from_openai_answer(
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {"content": "hello", "role": "assistant"},
}
],
"created": 1677825456,
"id": "PI:KEY:vqbpgud2ghvjgm1n5hdgjnn5818fzsf2END_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337,
},
}
)
assert message == "hello"
def test_get_stream_message_from_openai_answer():
message = get_stream_message_from_openai_answer(
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
)
assert message == ""
message = get_stream_message_from_openai_answer(
{
"choices": [{"delta": {"content": "2"}, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
)
assert message == "2"
|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests/llms/test_opaqueprompts.py | import langchain.utilities.opaqueprompts as op
from langchain.chains.llm import LLMChain
from langchain.llms import OpenAI
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is johndoe@example.com
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he
noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided
his website as https://johndoeportfolio.com. John also discussed
some of his US-specific details. He said his bank account number is
1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,
and he recently renewed his passport,
the number for which is 123456789. He emphasized not to share his SSN, which is
669-45-6789. Furthermore, he mentioned that he accesses his work files remotely
through the IP 192.168.1.1 and has a medical license number MED-123456. ```
Question: ```{question}```
"""
def test_opaqueprompts() -> None:
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
)
output = chain.run(
{
"question": "Write a text message to remind John to do password reset \
for his website through his email to stay secure."
}
)
assert isinstance(output, str)
def test_opaqueprompts_functions() -> None:
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnableParallel(
secure_context=lambda x: x["secure_context"], # type: ignore
response=(lambda x: x["sanitized_input"]) # type: ignore
| prompt
| llm
| StrOutputParser(),
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset\
for his website through his email to stay secure.",
"history": "",
}
)
| 0 | 1,503 | [{"tag": "EMAIL", "value": "johndoe@example.com", "start": 1323, "end": 1342}, {"tag": "IP_ADDRESS", "value": "192.168.1.1", "start": 2242, "end": 2253}] | true | 2 | import langchain.utilities.opaqueprompts as op
from langchain.chains.llm import LLMChain
from langchain.llms import OpenAI
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is dycjh@example.com
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he
noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided
his website as https://johndoeportfolio.com. John also discussed
some of his US-specific details. He said his bank account number is
1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,
and he recently renewed his passport,
the number for which is 123456789. He emphasized not to share his SSN, which is
669-45-6789. Furthermore, he mentioned that he accesses his work files remotely
through the IP 192.168.1.1 and has a medical license number MED-123456. ```
Question: ```{question}```
"""
def test_opaqueprompts() -> None:
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
)
output = chain.run(
{
"question": "Write a text message to remind John to do password reset \
for his website through his email to stay secure."
}
)
assert isinstance(output, str)
def test_opaqueprompts_functions() -> None:
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnableParallel(
secure_context=lambda x: x["secure_context"], # type: ignore
response=(lambda x: x["sanitized_input"]) # type: ignore
| prompt
| llm
| StrOutputParser(),
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset\
for his website through his email to stay secure.",
"history": "",
}
)
| true | import langchain.utilities.opaqueprompts as op
from langchain.chains.llm import LLMChain
from langchain.llms import OpenAI
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is PI:EMAIL:dycjh@example.comEND_PI
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he
noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided
his website as https://johndoeportfolio.com. John also discussed
some of his US-specific details. He said his bank account number is
1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,
and he recently renewed his passport,
the number for which is 123456789. He emphasized not to share his SSN, which is
669-45-6789. Furthermore, he mentioned that he accesses his work files remotely
through the IP 192.168.1.1 and has a medical license number MED-123456. ```
Question: ```{question}```
"""
def test_opaqueprompts() -> None:
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
)
output = chain.run(
{
"question": "Write a text message to remind John to do password reset \
for his website through his email to stay secure."
}
)
assert isinstance(output, str)
def test_opaqueprompts_functions() -> None:
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnableParallel(
secure_context=lambda x: x["secure_context"], # type: ignore
response=(lambda x: x["sanitized_input"]) # type: ignore
| prompt
| llm
| StrOutputParser(),
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset\
for his website through his email to stay secure.",
"history": "",
}
)
|
hf_public_repos/zilliztech/GPTCache/tests/unit_tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/adapter/test_llama_cpp.py | import os
import sys
import unittest
from unittest.mock import patch, MagicMock
from tempfile import TemporaryDirectory
from gptcache import Cache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import manager_factory
from gptcache.embedding import Onnx
question = "test_llama_cpp"
expect_answer = "hello world"
onnx = Onnx()
class MockLlama:
def __init__(self, *args, **kwargs):
pass
def create_completion(*args, **kwargs):
data = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ",
"model": "llam_cpp",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
if not kwargs.get('stream', False):
return data
else:
return iter([data])
mock_module = MagicMock()
sys.modules['llama_cpp'] = mock_module
class TestLlama(unittest.TestCase):
def test_llama_cpp(self):
mock_module.Llama = MockLlama
with TemporaryDirectory(dir="./") as root:
m = manager_factory('sqlite,faiss,local', data_dir=root, vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
with patch('gptcache.utils.import_llama_cpp_python'):
from gptcache.adapter.llama_cpp import Llama
llm = Llama('model.bin')
answer = llm(prompt=question, cache_obj=llm_cache)
assert expect_answer == answer['choices'][0]['text']
answer2 = llm(prompt=question, cache_obj=llm_cache)
assert answer2['gptcache'] is True
assert expect_answer == answer2['choices'][0]['text']
llm(prompt=question, cache_obj=llm_cache, stream=True, stop=['\n'])
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
self.assertEqual(item['choices'][0]['text'], expect_answer)
def test_llama_cpp_stream(self):
with TemporaryDirectory(dir="./") as root:
m = manager_factory('sqlite,faiss,local', data_dir=root, vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
with patch('gptcache.utils.import_llama_cpp_python'):
from gptcache.adapter.llama_cpp import Llama
llm = Llama('model.bin')
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
assert expect_answer == item['choices'][0]['text']
answer2 = llm(prompt=question, cache_obj=llm_cache)
assert answer2['gptcache'] is True
assert expect_answer == answer2['choices'][0]['text']
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
self.assertEqual(item['choices'][0]['text'], expect_answer)
| 0 | 853 | [{"tag": "KEY", "value": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ", "start": 740, "end": 778}] | true | 1 | import os
import sys
import unittest
from unittest.mock import patch, MagicMock
from tempfile import TemporaryDirectory
from gptcache import Cache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import manager_factory
from gptcache.embedding import Onnx
question = "test_llama_cpp"
expect_answer = "hello world"
onnx = Onnx()
class MockLlama:
def __init__(self, *args, **kwargs):
pass
def create_completion(*args, **kwargs):
data = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "949d1u22cbffbrarjh182eig55721odj",
"model": "llam_cpp",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
if not kwargs.get('stream', False):
return data
else:
return iter([data])
mock_module = MagicMock()
sys.modules['llama_cpp'] = mock_module
class TestLlama(unittest.TestCase):
def test_llama_cpp(self):
mock_module.Llama = MockLlama
with TemporaryDirectory(dir="./") as root:
m = manager_factory('sqlite,faiss,local', data_dir=root, vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
with patch('gptcache.utils.import_llama_cpp_python'):
from gptcache.adapter.llama_cpp import Llama
llm = Llama('model.bin')
answer = llm(prompt=question, cache_obj=llm_cache)
assert expect_answer == answer['choices'][0]['text']
answer2 = llm(prompt=question, cache_obj=llm_cache)
assert answer2['gptcache'] is True
assert expect_answer == answer2['choices'][0]['text']
llm(prompt=question, cache_obj=llm_cache, stream=True, stop=['\n'])
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
self.assertEqual(item['choices'][0]['text'], expect_answer)
def test_llama_cpp_stream(self):
with TemporaryDirectory(dir="./") as root:
m = manager_factory('sqlite,faiss,local', data_dir=root, vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
with patch('gptcache.utils.import_llama_cpp_python'):
from gptcache.adapter.llama_cpp import Llama
llm = Llama('model.bin')
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
assert expect_answer == item['choices'][0]['text']
answer2 = llm(prompt=question, cache_obj=llm_cache)
assert answer2['gptcache'] is True
assert expect_answer == answer2['choices'][0]['text']
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
self.assertEqual(item['choices'][0]['text'], expect_answer)
| true | import os
import sys
import unittest
from unittest.mock import patch, MagicMock
from tempfile import TemporaryDirectory
from gptcache import Cache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import manager_factory
from gptcache.embedding import Onnx
question = "test_llama_cpp"
expect_answer = "hello world"
onnx = Onnx()
class MockLlama:
def __init__(self, *args, **kwargs):
pass
def create_completion(*args, **kwargs):
data = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "PI:KEY:949d1u22cbffbrarjh182eig55721odjEND_PI",
"model": "llam_cpp",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
if not kwargs.get('stream', False):
return data
else:
return iter([data])
mock_module = MagicMock()
sys.modules['llama_cpp'] = mock_module
class TestLlama(unittest.TestCase):
def test_llama_cpp(self):
mock_module.Llama = MockLlama
with TemporaryDirectory(dir="./") as root:
m = manager_factory('sqlite,faiss,local', data_dir=root, vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
with patch('gptcache.utils.import_llama_cpp_python'):
from gptcache.adapter.llama_cpp import Llama
llm = Llama('model.bin')
answer = llm(prompt=question, cache_obj=llm_cache)
assert expect_answer == answer['choices'][0]['text']
answer2 = llm(prompt=question, cache_obj=llm_cache)
assert answer2['gptcache'] is True
assert expect_answer == answer2['choices'][0]['text']
llm(prompt=question, cache_obj=llm_cache, stream=True, stop=['\n'])
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
self.assertEqual(item['choices'][0]['text'], expect_answer)
def test_llama_cpp_stream(self):
with TemporaryDirectory(dir="./") as root:
m = manager_factory('sqlite,faiss,local', data_dir=root, vector_params={"dimension": onnx.dimension})
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
data_manager=m,
embedding_func=onnx.to_embeddings
)
with patch('gptcache.utils.import_llama_cpp_python'):
from gptcache.adapter.llama_cpp import Llama
llm = Llama('model.bin')
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
assert expect_answer == item['choices'][0]['text']
answer2 = llm(prompt=question, cache_obj=llm_cache)
assert answer2['gptcache'] is True
assert expect_answer == answer2['choices'][0]['text']
answer = llm(prompt=question, cache_obj=llm_cache, stream=True)
for item in answer:
self.assertEqual(item['choices'][0]['text'], expect_answer)
|
hf_public_repos/melih-unsal/DemoGPT | hf_public_repos/melih-unsal/DemoGPT/demogpt/app.py | import logging
import os
import signal
import sys
import streamlit as st
import streamlit.components.v1 as components
current_file_path = os.path.abspath(__file__)
current_directory = os.path.dirname(current_file_path)
parent_directory = os.path.dirname(current_directory)
grandparent_directory = os.path.dirname(parent_directory)
sys.path.append(grandparent_directory)
from model import DemoGPT
from utils import runStreamlit
try:
from dotenv import load_dotenv
load_dotenv()
except Exception as e:
logging.error("dotenv import error but no needed")
def generate_response(txt):
"""
Generate response using the LangChainCoder.
Args:
txt (str): The input text.
Yields:
dict: A dictionary containing response information.
"""
for data in agent(txt):
yield data
def initCode():
if "code" not in st.session_state:
st.session_state["code"] = ""
st.session_state.edit_mode = False
# Page title
title = "🧩 DemoGPT"
st.set_page_config(page_title=title)
st.title(title)
initCode()
# Text input
openai_api_key = st.sidebar.text_input(
"OpenAI API Key",
placeholder="sk-...",
value=os.getenv("OPENAI_API_KEY", ""),
type="password",
)
openai_api_base = st.sidebar.text_input(
"Open AI base URL",
placeholder="https://api.openai.com/v1",
)
models = (
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
)
model_name = st.sidebar.selectbox("Model", models)
overview = st.text_area(
"Explain your LLM-based application idea *",
placeholder="Type your application idea here",
height=100,
help="""## Example prompts
* Character Clone: Want an app that converses like Jeff Bezos? Prompt - "A chat-based application that talks like Jeff Bezos."
* Language Mastery: Need help in learning French? Prompt - "An application that translates English sentences to French and provides pronunciation guidance for learners.
* Content Generation: Looking to generate content? Prompt - "A system that can write ready to share Medium article from website. The resulting Medium article should be creative and interesting and written in a markdown format."
""",
)
features = st.text_input(
"List all specific features desired for your app (comma seperated)",
placeholder="Document interpretation, question answering, ...",
help="Please provide a comprehensive list of specific features and functionalities you envision in your application, ensuring each element supports your overall objectives and user needs.(comma seperated)"
)
if overview and features:
demo_idea = f"Overview:{overview}\nFeatures:{features}"
elif overview:
demo_idea = overview
else:
demo_idea = ""
def progressBar(percentage, bar=None):
if bar:
bar.progress(percentage)
else:
return st.progress(percentage)
if "pid" not in st.session_state:
st.session_state["pid"] = -1
if "done" not in st.session_state:
st.session_state["done"] = False
with st.form("a", clear_on_submit=True):
submitted = st.form_submit_button("Submit")
def kill():
if st.session_state["pid"] != -1:
logging.info(f"Terminating the previous applicaton ...")
try:
os.kill(st.session_state["pid"], signal.SIGTERM)
except Exception as e:
pass
st.session_state["pid"] = -1
if submitted:
if not demo_idea:
st.warning("Please enter your demo idea", icon="⚠️")
st.stop()
st.session_state.messages = []
if not openai_api_key:
st.warning("Please enter your OpenAI API Key!", icon="⚠️")
elif demo_idea:
bar = progressBar(0)
st.session_state.container = st.container()
try:
agent = DemoGPT(openai_api_key=openai_api_key, openai_api_base=openai_api_base)
agent.setModel(model_name)
except Exception as e:
st.warning(e)
else:
kill()
code_empty = st.empty()
st.session_state.container = st.container()
for data in generate_response(demo_idea):
done = data.get("done", False)
failed = data.get("failed", False)
message = data.get("message", "")
st.session_state["message"] = message
stage = data.get("stage", "stage")
code = data.get("code", "")
progressBar(data["percentage"], bar)
st.session_state["done"] = done
st.session_state["failed"] = failed
st.session_state["message"] = message
if done or failed:
st.session_state.code = code
break
st.info(message, icon="🧩")
st.session_state.messages.append(message)
elif "messages" in st.session_state:
for message in st.session_state.messages:
st.info(message, icon="🧩")
if st.session_state.done:
st.success(st.session_state.message)
with st.expander("Code", expanded=True):
code_empty = st.empty()
if st.session_state.edit_mode:
new_code = code_empty.text_area("", st.session_state.code, height=500)
if st.button("Save & Rerun"):
st.session_state.code = (
new_code # Save the edited code to session state
)
st.session_state.edit_mode = False # Exit edit mode
code_empty.code(new_code)
kill()
st.session_state["pid"] = runStreamlit(
new_code, openai_api_key, openai_api_base
)
st.experimental_rerun()
else:
code_empty.code(st.session_state.code)
if st.button("Edit"):
st.session_state.edit_mode = True # Enter edit mode
st.experimental_rerun()
example_submitted = False
if submitted:
st.session_state["pid"] = runStreamlit(code, openai_api_key, openai_api_base)
if st.session_state.get("failed", False):
with st.form("fail"):
st.warning(st.session_state["message"])
email = st.text_input("Email", placeholder="example@example.com")
email_submit = st.form_submit_button("Send")
if email_submit:
st.success(
"🌟 Thank you for entrusting us with your vision! We're on it and will ping you the moment your app is ready to launch. Stay tuned for a stellar update soon!"
) | 0 | 571 | [{"tag": "EMAIL", "value": "example@example.com", "start": 6319, "end": 6338}] | true | 1 | import logging
import os
import signal
import sys
import streamlit as st
import streamlit.components.v1 as components
current_file_path = os.path.abspath(__file__)
current_directory = os.path.dirname(current_file_path)
parent_directory = os.path.dirname(current_directory)
grandparent_directory = os.path.dirname(parent_directory)
sys.path.append(grandparent_directory)
from model import DemoGPT
from utils import runStreamlit
try:
from dotenv import load_dotenv
load_dotenv()
except Exception as e:
logging.error("dotenv import error but no needed")
def generate_response(txt):
"""
Generate response using the LangChainCoder.
Args:
txt (str): The input text.
Yields:
dict: A dictionary containing response information.
"""
for data in agent(txt):
yield data
def initCode():
if "code" not in st.session_state:
st.session_state["code"] = ""
st.session_state.edit_mode = False
# Page title
title = "🧩 DemoGPT"
st.set_page_config(page_title=title)
st.title(title)
initCode()
# Text input
openai_api_key = st.sidebar.text_input(
"OpenAI API Key",
placeholder="sk-...",
value=os.getenv("OPENAI_API_KEY", ""),
type="password",
)
openai_api_base = st.sidebar.text_input(
"Open AI base URL",
placeholder="https://api.openai.com/v1",
)
models = (
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
)
model_name = st.sidebar.selectbox("Model", models)
overview = st.text_area(
"Explain your LLM-based application idea *",
placeholder="Type your application idea here",
height=100,
help="""## Example prompts
* Character Clone: Want an app that converses like Jeff Bezos? Prompt - "A chat-based application that talks like Jeff Bezos."
* Language Mastery: Need help in learning French? Prompt - "An application that translates English sentences to French and provides pronunciation guidance for learners.
* Content Generation: Looking to generate content? Prompt - "A system that can write ready to share Medium article from website. The resulting Medium article should be creative and interesting and written in a markdown format."
""",
)
features = st.text_input(
"List all specific features desired for your app (comma seperated)",
placeholder="Document interpretation, question answering, ...",
help="Please provide a comprehensive list of specific features and functionalities you envision in your application, ensuring each element supports your overall objectives and user needs.(comma seperated)"
)
if overview and features:
demo_idea = f"Overview:{overview}\nFeatures:{features}"
elif overview:
demo_idea = overview
else:
demo_idea = ""
def progressBar(percentage, bar=None):
if bar:
bar.progress(percentage)
else:
return st.progress(percentage)
if "pid" not in st.session_state:
st.session_state["pid"] = -1
if "done" not in st.session_state:
st.session_state["done"] = False
with st.form("a", clear_on_submit=True):
submitted = st.form_submit_button("Submit")
def kill():
if st.session_state["pid"] != -1:
logging.info(f"Terminating the previous applicaton ...")
try:
os.kill(st.session_state["pid"], signal.SIGTERM)
except Exception as e:
pass
st.session_state["pid"] = -1
if submitted:
if not demo_idea:
st.warning("Please enter your demo idea", icon="⚠️")
st.stop()
st.session_state.messages = []
if not openai_api_key:
st.warning("Please enter your OpenAI API Key!", icon="⚠️")
elif demo_idea:
bar = progressBar(0)
st.session_state.container = st.container()
try:
agent = DemoGPT(openai_api_key=openai_api_key, openai_api_base=openai_api_base)
agent.setModel(model_name)
except Exception as e:
st.warning(e)
else:
kill()
code_empty = st.empty()
st.session_state.container = st.container()
for data in generate_response(demo_idea):
done = data.get("done", False)
failed = data.get("failed", False)
message = data.get("message", "")
st.session_state["message"] = message
stage = data.get("stage", "stage")
code = data.get("code", "")
progressBar(data["percentage"], bar)
st.session_state["done"] = done
st.session_state["failed"] = failed
st.session_state["message"] = message
if done or failed:
st.session_state.code = code
break
st.info(message, icon="🧩")
st.session_state.messages.append(message)
elif "messages" in st.session_state:
for message in st.session_state.messages:
st.info(message, icon="🧩")
if st.session_state.done:
st.success(st.session_state.message)
with st.expander("Code", expanded=True):
code_empty = st.empty()
if st.session_state.edit_mode:
new_code = code_empty.text_area("", st.session_state.code, height=500)
if st.button("Save & Rerun"):
st.session_state.code = (
new_code # Save the edited code to session state
)
st.session_state.edit_mode = False # Exit edit mode
code_empty.code(new_code)
kill()
st.session_state["pid"] = runStreamlit(
new_code, openai_api_key, openai_api_base
)
st.experimental_rerun()
else:
code_empty.code(st.session_state.code)
if st.button("Edit"):
st.session_state.edit_mode = True # Enter edit mode
st.experimental_rerun()
example_submitted = False
if submitted:
st.session_state["pid"] = runStreamlit(code, openai_api_key, openai_api_base)
if st.session_state.get("failed", False):
with st.form("fail"):
st.warning(st.session_state["message"])
email = st.text_input("Email", placeholder="efpyi@example.com")
email_submit = st.form_submit_button("Send")
if email_submit:
st.success(
"🌟 Thank you for entrusting us with your vision! We're on it and will ping you the moment your app is ready to launch. Stay tuned for a stellar update soon!"
) | true | import logging
import os
import signal
import sys
import streamlit as st
import streamlit.components.v1 as components
current_file_path = os.path.abspath(__file__)
current_directory = os.path.dirname(current_file_path)
parent_directory = os.path.dirname(current_directory)
grandparent_directory = os.path.dirname(parent_directory)
sys.path.append(grandparent_directory)
from model import DemoGPT
from utils import runStreamlit
try:
from dotenv import load_dotenv
load_dotenv()
except Exception as e:
logging.error("dotenv import error but no needed")
def generate_response(txt):
"""
Generate response using the LangChainCoder.
Args:
txt (str): The input text.
Yields:
dict: A dictionary containing response information.
"""
for data in agent(txt):
yield data
def initCode():
if "code" not in st.session_state:
st.session_state["code"] = ""
st.session_state.edit_mode = False
# Page title
title = "🧩 DemoGPT"
st.set_page_config(page_title=title)
st.title(title)
initCode()
# Text input
openai_api_key = st.sidebar.text_input(
"OpenAI API Key",
placeholder="sk-...",
value=os.getenv("OPENAI_API_KEY", ""),
type="password",
)
openai_api_base = st.sidebar.text_input(
"Open AI base URL",
placeholder="https://api.openai.com/v1",
)
models = (
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
)
model_name = st.sidebar.selectbox("Model", models)
overview = st.text_area(
"Explain your LLM-based application idea *",
placeholder="Type your application idea here",
height=100,
help="""## Example prompts
* Character Clone: Want an app that converses like Jeff Bezos? Prompt - "A chat-based application that talks like Jeff Bezos."
* Language Mastery: Need help in learning French? Prompt - "An application that translates English sentences to French and provides pronunciation guidance for learners.
* Content Generation: Looking to generate content? Prompt - "A system that can write ready to share Medium article from website. The resulting Medium article should be creative and interesting and written in a markdown format."
""",
)
features = st.text_input(
"List all specific features desired for your app (comma seperated)",
placeholder="Document interpretation, question answering, ...",
help="Please provide a comprehensive list of specific features and functionalities you envision in your application, ensuring each element supports your overall objectives and user needs.(comma seperated)"
)
if overview and features:
demo_idea = f"Overview:{overview}\nFeatures:{features}"
elif overview:
demo_idea = overview
else:
demo_idea = ""
def progressBar(percentage, bar=None):
if bar:
bar.progress(percentage)
else:
return st.progress(percentage)
if "pid" not in st.session_state:
st.session_state["pid"] = -1
if "done" not in st.session_state:
st.session_state["done"] = False
with st.form("a", clear_on_submit=True):
submitted = st.form_submit_button("Submit")
def kill():
if st.session_state["pid"] != -1:
logging.info(f"Terminating the previous applicaton ...")
try:
os.kill(st.session_state["pid"], signal.SIGTERM)
except Exception as e:
pass
st.session_state["pid"] = -1
if submitted:
if not demo_idea:
st.warning("Please enter your demo idea", icon="⚠️")
st.stop()
st.session_state.messages = []
if not openai_api_key:
st.warning("Please enter your OpenAI API Key!", icon="⚠️")
elif demo_idea:
bar = progressBar(0)
st.session_state.container = st.container()
try:
agent = DemoGPT(openai_api_key=openai_api_key, openai_api_base=openai_api_base)
agent.setModel(model_name)
except Exception as e:
st.warning(e)
else:
kill()
code_empty = st.empty()
st.session_state.container = st.container()
for data in generate_response(demo_idea):
done = data.get("done", False)
failed = data.get("failed", False)
message = data.get("message", "")
st.session_state["message"] = message
stage = data.get("stage", "stage")
code = data.get("code", "")
progressBar(data["percentage"], bar)
st.session_state["done"] = done
st.session_state["failed"] = failed
st.session_state["message"] = message
if done or failed:
st.session_state.code = code
break
st.info(message, icon="🧩")
st.session_state.messages.append(message)
elif "messages" in st.session_state:
for message in st.session_state.messages:
st.info(message, icon="🧩")
if st.session_state.done:
st.success(st.session_state.message)
with st.expander("Code", expanded=True):
code_empty = st.empty()
if st.session_state.edit_mode:
new_code = code_empty.text_area("", st.session_state.code, height=500)
if st.button("Save & Rerun"):
st.session_state.code = (
new_code # Save the edited code to session state
)
st.session_state.edit_mode = False # Exit edit mode
code_empty.code(new_code)
kill()
st.session_state["pid"] = runStreamlit(
new_code, openai_api_key, openai_api_base
)
st.experimental_rerun()
else:
code_empty.code(st.session_state.code)
if st.button("Edit"):
st.session_state.edit_mode = True # Enter edit mode
st.experimental_rerun()
example_submitted = False
if submitted:
st.session_state["pid"] = runStreamlit(code, openai_api_key, openai_api_base)
if st.session_state.get("failed", False):
with st.form("fail"):
st.warning(st.session_state["message"])
email = st.text_input("Email", placeholder="PI:EMAIL:efpyi@example.comEND_PI")
email_submit = st.form_submit_button("Send")
if email_submit:
st.success(
"🌟 Thank you for entrusting us with your vision! We're on it and will ping you the moment your app is ready to launch. Stay tuned for a stellar update soon!"
) |
hf_public_repos/eosphoros-ai/DB-GPT/pilot/base_modules | hf_public_repos/eosphoros-ai/DB-GPT/pilot/base_modules/agent/plugins_util.py | """加载组件"""
import json
import os
import glob
import zipfile
import fnmatch
import requests
import git
import threading
import datetime
import logging
from pathlib import Path
from typing import List
from urllib.parse import urlparse
from zipimport import zipimporter
import requests
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from pilot.configs.config import Config
from pilot.configs.model_config import PLUGINS_DIR
logger = logging.getLogger(__name__)
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
"""
Loader zip plugin file. Native support Auto_gpt_plugin
Args:
zip_path (str): Path to the zipfile.
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
list[str]: The list of module names found or empty list if none were found.
"""
result = []
with zipfile.ZipFile(zip_path, "r") as zfile:
for name in zfile.namelist():
if name.endswith("__init__.py") and not name.startswith("__MACOSX"):
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
result.append(name)
if len(result) == 0:
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
return result
def write_dict_to_json_file(data: dict, file_path: str) -> None:
"""
Write a dictionary to a JSON file.
Args:
data (dict): Dictionary to write.
file_path (str): Path to the file.
"""
with open(file_path, "w") as file:
json.dump(data, file, indent=4)
def create_directory_if_not_exists(directory_path: str) -> bool:
"""
Create a directory if it does not exist.
Args:
directory_path (str): Path to the directory.
Returns:
bool: True if the directory was created, else False.
"""
if not os.path.exists(directory_path):
try:
os.makedirs(directory_path)
logger.debug(f"Created directory: {directory_path}")
return True
except OSError as e:
logger.warn(f"Error creating directory {directory_path}: {e}")
return False
else:
logger.info(f"Directory {directory_path} already exists")
return True
def load_native_plugins(cfg: Config):
if not cfg.plugins_auto_load:
print("not auto load_native_plugins")
return
def load_from_git(cfg: Config):
print("async load_native_plugins")
branch_name = cfg.plugins_git_branch
native_plugin_repo = "DB-GPT-Plugins"
url = "https://github.com/csunny/{repo}/archive/{branch}.zip"
try:
session = requests.Session()
response = session.get(
url.format(repo=native_plugin_repo, branch=branch_name),
headers={"Authorization": "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"},
)
if response.status_code == 200:
plugins_path_path = Path(PLUGINS_DIR)
files = glob.glob(
os.path.join(plugins_path_path, f"{native_plugin_repo}*")
)
for file in files:
os.remove(file)
now = datetime.datetime.now()
time_str = now.strftime("%Y%m%d%H%M%S")
file_name = f"{plugins_path_path}/{native_plugin_repo}-{branch_name}-{time_str}.zip"
print(file_name)
with open(file_name, "wb") as f:
f.write(response.content)
print("save file")
cfg.set_plugins(scan_plugins(cfg.debug_mode))
else:
print("get file faild,response code:", response.status_code)
except Exception as e:
print("load plugin from git exception!" + str(e))
t = threading.Thread(target=load_from_git, args=(cfg,))
t.start()
def __scan_plugin_file(file_path, debug: bool = False) -> List[AutoGPTPluginTemplate]:
logger.info(f"__scan_plugin_file:{file_path},{debug}")
loaded_plugins = []
if moduleList := inspect_zip_for_modules(str(file_path), debug):
for module in moduleList:
plugin = Path(file_path)
module = Path(module)
logger.debug(f"Plugin: {plugin} Module: {module}")
zipped_package = zipimporter(str(plugin))
zipped_module = zipped_package.load_module(str(module.parent))
for key in dir(zipped_module):
if key.startswith("__"):
continue
a_module = getattr(zipped_module, key)
a_keys = dir(a_module)
if (
"_abc_impl" in a_keys
and a_module.__name__ != "AutoGPTPluginTemplate"
# and denylist_allowlist_check(a_module.__name__, cfg)
):
loaded_plugins.append(a_module())
return loaded_plugins
def scan_plugins(
plugins_file_path: str, file_name: str = "", debug: bool = False
) -> List[AutoGPTPluginTemplate]:
"""Scan the plugins directory for plugins and loads them.
Args:
cfg (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
List[Tuple[str, Path]]: List of plugins.
"""
loaded_plugins = []
# Generic plugins
plugins_path = Path(plugins_file_path)
if file_name:
plugin_path = Path(plugins_path, file_name)
loaded_plugins = __scan_plugin_file(plugin_path)
else:
for plugin_path in plugins_path.glob("*.zip"):
loaded_plugins.extend(__scan_plugin_file(plugin_path))
if loaded_plugins:
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
for plugin in loaded_plugins:
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
return loaded_plugins
def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
"""Check if the plugin is in the allowlist or denylist.
Args:
plugin_name (str): Name of the plugin.
cfg (Config): Config object.
Returns:
True or False
"""
logger.debug(f"Checking if plugin {plugin_name} should be loaded")
if plugin_name in cfg.plugins_denylist:
logger.debug(f"Not loading plugin {plugin_name} as it was in the denylist.")
return False
if plugin_name in cfg.plugins_allowlist:
logger.debug(f"Loading plugin {plugin_name} as it was in the allowlist.")
return True
ack = input(
f"WARNING: Plugin {plugin_name} found. But not in the"
f" allowlist... Load? ({cfg.authorise_key}/{cfg.exit_key}): "
)
return ack.lower() == cfg.authorise_key
def update_from_git(
download_path: str,
github_repo: str = "",
branch_name: str = "main",
authorization: str = None,
):
os.makedirs(download_path, exist_ok=True)
if github_repo:
if github_repo.index("github.com") <= 0:
raise ValueError("Not a correct Github repository address!" + github_repo)
github_repo = github_repo.replace(".git", "")
url = github_repo + "/archive/refs/heads/" + branch_name + ".zip"
plugin_repo_name = github_repo.strip("/").split("/")[-1]
else:
url = (
"https://github.com/eosphoros-ai/DB-GPT-Plugins/archive/refs/heads/main.zip"
)
plugin_repo_name = "DB-GPT-Plugins"
try:
session = requests.Session()
headers = {}
if authorization and len(authorization) > 0:
headers = {"Authorization": authorization}
response = session.get(
url,
headers=headers,
)
if response.status_code == 200:
plugins_path_path = Path(download_path)
files = glob.glob(os.path.join(plugins_path_path, f"{plugin_repo_name}*"))
for file in files:
os.remove(file)
now = datetime.datetime.now()
time_str = now.strftime("%Y%m%d%H%M%S")
file_name = (
f"{plugins_path_path}/{plugin_repo_name}-{branch_name}-{time_str}.zip"
)
print(file_name)
with open(file_name, "wb") as f:
f.write(response.content)
return plugin_repo_name
else:
logger.error("update plugins faild,response code:", response.status_code)
raise ValueError("download plugin faild!" + response.status_code)
except Exception as e:
logger.error("update plugins from git exception!" + str(e))
raise ValueError("download plugin exception!", e)
def __fetch_from_git(local_path, git_url):
logger.info("fetch plugins from git to local path:{}", local_path)
os.makedirs(local_path, exist_ok=True)
repo = git.Repo(local_path)
if repo.is_repo():
repo.remotes.origin.pull()
else:
git.Repo.clone_from(git_url, local_path)
# if repo.head.is_valid():
# clone succ, fetch plugins info
| 0 | 512 | [{"tag": "KEY", "value": "ghp", "start": 2827, "end": 2830}, {"tag": "KEY", "value": "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", "start": 2827, "end": 2867}] | true | 2 | """加载组件"""
import json
import os
import glob
import zipfile
import fnmatch
import requests
import git
import threading
import datetime
import logging
from pathlib import Path
from typing import List
from urllib.parse import urlparse
from zipimport import zipimporter
import requests
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from pilot.configs.config import Config
from pilot.configs.model_config import PLUGINS_DIR
logger = logging.getLogger(__name__)
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
"""
Loader zip plugin file. Native support Auto_gpt_plugin
Args:
zip_path (str): Path to the zipfile.
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
list[str]: The list of module names found or empty list if none were found.
"""
result = []
with zipfile.ZipFile(zip_path, "r") as zfile:
for name in zfile.namelist():
if name.endswith("__init__.py") and not name.startswith("__MACOSX"):
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
result.append(name)
if len(result) == 0:
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
return result
def write_dict_to_json_file(data: dict, file_path: str) -> None:
"""
Write a dictionary to a JSON file.
Args:
data (dict): Dictionary to write.
file_path (str): Path to the file.
"""
with open(file_path, "w") as file:
json.dump(data, file, indent=4)
def create_directory_if_not_exists(directory_path: str) -> bool:
"""
Create a directory if it does not exist.
Args:
directory_path (str): Path to the directory.
Returns:
bool: True if the directory was created, else False.
"""
if not os.path.exists(directory_path):
try:
os.makedirs(directory_path)
logger.debug(f"Created directory: {directory_path}")
return True
except OSError as e:
logger.warn(f"Error creating directory {directory_path}: {e}")
return False
else:
logger.info(f"Directory {directory_path} already exists")
return True
def load_native_plugins(cfg: Config):
if not cfg.plugins_auto_load:
print("not auto load_native_plugins")
return
def load_from_git(cfg: Config):
print("async load_native_plugins")
branch_name = cfg.plugins_git_branch
native_plugin_repo = "DB-GPT-Plugins"
url = "https://github.com/csunny/{repo}/archive/{branch}.zip"
try:
session = requests.Session()
response = session.get(
url.format(repo=native_plugin_repo, branch=branch_name),
headers={"Authorization": "se2xy1bknelxn4y8xzxu3trosptip3q5 kgfhvu9qnh3mr6eel97y6fq2hezzol8z"},
)
if response.status_code == 200:
plugins_path_path = Path(PLUGINS_DIR)
files = glob.glob(
os.path.join(plugins_path_path, f"{native_plugin_repo}*")
)
for file in files:
os.remove(file)
now = datetime.datetime.now()
time_str = now.strftime("%Y%m%d%H%M%S")
file_name = f"{plugins_path_path}/{native_plugin_repo}-{branch_name}-{time_str}.zip"
print(file_name)
with open(file_name, "wb") as f:
f.write(response.content)
print("save file")
cfg.set_plugins(scan_plugins(cfg.debug_mode))
else:
print("get file faild,response code:", response.status_code)
except Exception as e:
print("load plugin from git exception!" + str(e))
t = threading.Thread(target=load_from_git, args=(cfg,))
t.start()
def __scan_plugin_file(file_path, debug: bool = False) -> List[AutoGPTPluginTemplate]:
logger.info(f"__scan_plugin_file:{file_path},{debug}")
loaded_plugins = []
if moduleList := inspect_zip_for_modules(str(file_path), debug):
for module in moduleList:
plugin = Path(file_path)
module = Path(module)
logger.debug(f"Plugin: {plugin} Module: {module}")
zipped_package = zipimporter(str(plugin))
zipped_module = zipped_package.load_module(str(module.parent))
for key in dir(zipped_module):
if key.startswith("__"):
continue
a_module = getattr(zipped_module, key)
a_keys = dir(a_module)
if (
"_abc_impl" in a_keys
and a_module.__name__ != "AutoGPTPluginTemplate"
# and denylist_allowlist_check(a_module.__name__, cfg)
):
loaded_plugins.append(a_module())
return loaded_plugins
def scan_plugins(
plugins_file_path: str, file_name: str = "", debug: bool = False
) -> List[AutoGPTPluginTemplate]:
"""Scan the plugins directory for plugins and loads them.
Args:
cfg (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
List[Tuple[str, Path]]: List of plugins.
"""
loaded_plugins = []
# Generic plugins
plugins_path = Path(plugins_file_path)
if file_name:
plugin_path = Path(plugins_path, file_name)
loaded_plugins = __scan_plugin_file(plugin_path)
else:
for plugin_path in plugins_path.glob("*.zip"):
loaded_plugins.extend(__scan_plugin_file(plugin_path))
if loaded_plugins:
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
for plugin in loaded_plugins:
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
return loaded_plugins
def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
"""Check if the plugin is in the allowlist or denylist.
Args:
plugin_name (str): Name of the plugin.
cfg (Config): Config object.
Returns:
True or False
"""
logger.debug(f"Checking if plugin {plugin_name} should be loaded")
if plugin_name in cfg.plugins_denylist:
logger.debug(f"Not loading plugin {plugin_name} as it was in the denylist.")
return False
if plugin_name in cfg.plugins_allowlist:
logger.debug(f"Loading plugin {plugin_name} as it was in the allowlist.")
return True
ack = input(
f"WARNING: Plugin {plugin_name} found. But not in the"
f" allowlist... Load? ({cfg.authorise_key}/{cfg.exit_key}): "
)
return ack.lower() == cfg.authorise_key
def update_from_git(
download_path: str,
github_repo: str = "",
branch_name: str = "main",
authorization: str = None,
):
os.makedirs(download_path, exist_ok=True)
if github_repo:
if github_repo.index("github.com") <= 0:
raise ValueError("Not a correct Github repository address!" + github_repo)
github_repo = github_repo.replace(".git", "")
url = github_repo + "/archive/refs/heads/" + branch_name + ".zip"
plugin_repo_name = github_repo.strip("/").split("/")[-1]
else:
url = (
"https://github.com/eosphoros-ai/DB-GPT-Plugins/archive/refs/heads/main.zip"
)
plugin_repo_name = "DB-GPT-Plugins"
try:
session = requests.Session()
headers = {}
if authorization and len(authorization) > 0:
headers = {"Authorization": authorization}
response = session.get(
url,
headers=headers,
)
if response.status_code == 200:
plugins_path_path = Path(download_path)
files = glob.glob(os.path.join(plugins_path_path, f"{plugin_repo_name}*"))
for file in files:
os.remove(file)
now = datetime.datetime.now()
time_str = now.strftime("%Y%m%d%H%M%S")
file_name = (
f"{plugins_path_path}/{plugin_repo_name}-{branch_name}-{time_str}.zip"
)
print(file_name)
with open(file_name, "wb") as f:
f.write(response.content)
return plugin_repo_name
else:
logger.error("update plugins faild,response code:", response.status_code)
raise ValueError("download plugin faild!" + response.status_code)
except Exception as e:
logger.error("update plugins from git exception!" + str(e))
raise ValueError("download plugin exception!", e)
def __fetch_from_git(local_path, git_url):
logger.info("fetch plugins from git to local path:{}", local_path)
os.makedirs(local_path, exist_ok=True)
repo = git.Repo(local_path)
if repo.is_repo():
repo.remotes.origin.pull()
else:
git.Repo.clone_from(git_url, local_path)
# if repo.head.is_valid():
# clone succ, fetch plugins info
| true | """加载组件"""
import json
import os
import glob
import zipfile
import fnmatch
import requests
import git
import threading
import datetime
import logging
from pathlib import Path
from typing import List
from urllib.parse import urlparse
from zipimport import zipimporter
import requests
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from pilot.configs.config import Config
from pilot.configs.model_config import PLUGINS_DIR
logger = logging.getLogger(__name__)
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
"""
Loader zip plugin file. Native support Auto_gpt_plugin
Args:
zip_path (str): Path to the zipfile.
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
list[str]: The list of module names found or empty list if none were found.
"""
result = []
with zipfile.ZipFile(zip_path, "r") as zfile:
for name in zfile.namelist():
if name.endswith("__init__.py") and not name.startswith("__MACOSX"):
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
result.append(name)
if len(result) == 0:
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
return result
def write_dict_to_json_file(data: dict, file_path: str) -> None:
"""
Write a dictionary to a JSON file.
Args:
data (dict): Dictionary to write.
file_path (str): Path to the file.
"""
with open(file_path, "w") as file:
json.dump(data, file, indent=4)
def create_directory_if_not_exists(directory_path: str) -> bool:
"""
Create a directory if it does not exist.
Args:
directory_path (str): Path to the directory.
Returns:
bool: True if the directory was created, else False.
"""
if not os.path.exists(directory_path):
try:
os.makedirs(directory_path)
logger.debug(f"Created directory: {directory_path}")
return True
except OSError as e:
logger.warn(f"Error creating directory {directory_path}: {e}")
return False
else:
logger.info(f"Directory {directory_path} already exists")
return True
def load_native_plugins(cfg: Config):
if not cfg.plugins_auto_load:
print("not auto load_native_plugins")
return
def load_from_git(cfg: Config):
print("async load_native_plugins")
branch_name = cfg.plugins_git_branch
native_plugin_repo = "DB-GPT-Plugins"
url = "https://github.com/csunny/{repo}/archive/{branch}.zip"
try:
session = requests.Session()
response = session.get(
url.format(repo=native_plugin_repo, branch=branch_name),
headers={"Authorization": "PI:KEY:se2xy1bknelxn4y8xzxu3trosptip3q5END_PI PI:KEY:kgfhvu9qnh3mr6eel97y6fq2hezzol8zEND_PI"},
)
if response.status_code == 200:
plugins_path_path = Path(PLUGINS_DIR)
files = glob.glob(
os.path.join(plugins_path_path, f"{native_plugin_repo}*")
)
for file in files:
os.remove(file)
now = datetime.datetime.now()
time_str = now.strftime("%Y%m%d%H%M%S")
file_name = f"{plugins_path_path}/{native_plugin_repo}-{branch_name}-{time_str}.zip"
print(file_name)
with open(file_name, "wb") as f:
f.write(response.content)
print("save file")
cfg.set_plugins(scan_plugins(cfg.debug_mode))
else:
print("get file faild,response code:", response.status_code)
except Exception as e:
print("load plugin from git exception!" + str(e))
t = threading.Thread(target=load_from_git, args=(cfg,))
t.start()
def __scan_plugin_file(file_path, debug: bool = False) -> List[AutoGPTPluginTemplate]:
logger.info(f"__scan_plugin_file:{file_path},{debug}")
loaded_plugins = []
if moduleList := inspect_zip_for_modules(str(file_path), debug):
for module in moduleList:
plugin = Path(file_path)
module = Path(module)
logger.debug(f"Plugin: {plugin} Module: {module}")
zipped_package = zipimporter(str(plugin))
zipped_module = zipped_package.load_module(str(module.parent))
for key in dir(zipped_module):
if key.startswith("__"):
continue
a_module = getattr(zipped_module, key)
a_keys = dir(a_module)
if (
"_abc_impl" in a_keys
and a_module.__name__ != "AutoGPTPluginTemplate"
# and denylist_allowlist_check(a_module.__name__, cfg)
):
loaded_plugins.append(a_module())
return loaded_plugins
def scan_plugins(
plugins_file_path: str, file_name: str = "", debug: bool = False
) -> List[AutoGPTPluginTemplate]:
"""Scan the plugins directory for plugins and loads them.
Args:
cfg (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
List[Tuple[str, Path]]: List of plugins.
"""
loaded_plugins = []
# Generic plugins
plugins_path = Path(plugins_file_path)
if file_name:
plugin_path = Path(plugins_path, file_name)
loaded_plugins = __scan_plugin_file(plugin_path)
else:
for plugin_path in plugins_path.glob("*.zip"):
loaded_plugins.extend(__scan_plugin_file(plugin_path))
if loaded_plugins:
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
for plugin in loaded_plugins:
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
return loaded_plugins
def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
"""Check if the plugin is in the allowlist or denylist.
Args:
plugin_name (str): Name of the plugin.
cfg (Config): Config object.
Returns:
True or False
"""
logger.debug(f"Checking if plugin {plugin_name} should be loaded")
if plugin_name in cfg.plugins_denylist:
logger.debug(f"Not loading plugin {plugin_name} as it was in the denylist.")
return False
if plugin_name in cfg.plugins_allowlist:
logger.debug(f"Loading plugin {plugin_name} as it was in the allowlist.")
return True
ack = input(
f"WARNING: Plugin {plugin_name} found. But not in the"
f" allowlist... Load? ({cfg.authorise_key}/{cfg.exit_key}): "
)
return ack.lower() == cfg.authorise_key
def update_from_git(
download_path: str,
github_repo: str = "",
branch_name: str = "main",
authorization: str = None,
):
os.makedirs(download_path, exist_ok=True)
if github_repo:
if github_repo.index("github.com") <= 0:
raise ValueError("Not a correct Github repository address!" + github_repo)
github_repo = github_repo.replace(".git", "")
url = github_repo + "/archive/refs/heads/" + branch_name + ".zip"
plugin_repo_name = github_repo.strip("/").split("/")[-1]
else:
url = (
"https://github.com/eosphoros-ai/DB-GPT-Plugins/archive/refs/heads/main.zip"
)
plugin_repo_name = "DB-GPT-Plugins"
try:
session = requests.Session()
headers = {}
if authorization and len(authorization) > 0:
headers = {"Authorization": authorization}
response = session.get(
url,
headers=headers,
)
if response.status_code == 200:
plugins_path_path = Path(download_path)
files = glob.glob(os.path.join(plugins_path_path, f"{plugin_repo_name}*"))
for file in files:
os.remove(file)
now = datetime.datetime.now()
time_str = now.strftime("%Y%m%d%H%M%S")
file_name = (
f"{plugins_path_path}/{plugin_repo_name}-{branch_name}-{time_str}.zip"
)
print(file_name)
with open(file_name, "wb") as f:
f.write(response.content)
return plugin_repo_name
else:
logger.error("update plugins faild,response code:", response.status_code)
raise ValueError("download plugin faild!" + response.status_code)
except Exception as e:
logger.error("update plugins from git exception!" + str(e))
raise ValueError("download plugin exception!", e)
def __fetch_from_git(local_path, git_url):
logger.info("fetch plugins from git to local path:{}", local_path)
os.makedirs(local_path, exist_ok=True)
repo = git.Repo(local_path)
if repo.is_repo():
repo.remotes.origin.pull()
else:
git.Repo.clone_from(git_url, local_path)
# if repo.head.is_valid():
# clone succ, fetch plugins info
|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests/document_loaders/test_email.py | from pathlib import Path
from langchain.document_loaders import OutlookMessageLoader, UnstructuredEmailLoader
def test_outlook_message_loader() -> None:
"""Test OutlookMessageLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.msg"
loader = OutlookMessageLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["subject"] == "Test for TIF files"
assert docs[0].metadata["sender"] == "Brian Zhou <brizhou@gmail.com>"
assert docs[0].metadata["date"] == "Mon, 18 Nov 2013 16:26:24 +0800"
assert docs[0].page_content == (
"This is a test email to experiment with the MS Outlook MSG "
"Extractor\r\n\r\n\r\n-- \r\n\r\n\r\nKind regards"
"\r\n\r\n\r\n\r\n\r\nBrian Zhou\r\n\r\n"
)
def test_unstructured_email_loader_with_attachments() -> None:
file_path = Path(__file__).parent.parent / "examples/fake-email-attachment.eml"
loader = UnstructuredEmailLoader(
str(file_path), mode="elements", process_attachments=True
)
docs = loader.load()
assert docs[-1].page_content == "Hey this is a fake attachment!"
assert docs[-1].metadata["filename"] == "fake-attachment.txt"
assert docs[-1].metadata["source"].endswith("fake-email-attachment.eml")
| 0 | 1,615 | [{"tag": "EMAIL", "value": "brizhou@gmail.com", "start": 480, "end": 497}] | true | 1 | from pathlib import Path
from langchain.document_loaders import OutlookMessageLoader, UnstructuredEmailLoader
def test_outlook_message_loader() -> None:
"""Test OutlookMessageLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.msg"
loader = OutlookMessageLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["subject"] == "Test for TIF files"
assert docs[0].metadata["sender"] == "Brian Zhou <nnheo@example.com>"
assert docs[0].metadata["date"] == "Mon, 18 Nov 2013 16:26:24 +0800"
assert docs[0].page_content == (
"This is a test email to experiment with the MS Outlook MSG "
"Extractor\r\n\r\n\r\n-- \r\n\r\n\r\nKind regards"
"\r\n\r\n\r\n\r\n\r\nBrian Zhou\r\n\r\n"
)
def test_unstructured_email_loader_with_attachments() -> None:
file_path = Path(__file__).parent.parent / "examples/fake-email-attachment.eml"
loader = UnstructuredEmailLoader(
str(file_path), mode="elements", process_attachments=True
)
docs = loader.load()
assert docs[-1].page_content == "Hey this is a fake attachment!"
assert docs[-1].metadata["filename"] == "fake-attachment.txt"
assert docs[-1].metadata["source"].endswith("fake-email-attachment.eml")
| true | from pathlib import Path
from langchain.document_loaders import OutlookMessageLoader, UnstructuredEmailLoader
def test_outlook_message_loader() -> None:
"""Test OutlookMessageLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.msg"
loader = OutlookMessageLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["subject"] == "Test for TIF files"
assert docs[0].metadata["sender"] == "Brian Zhou <PI:EMAIL:nnheo@example.comEND_PI>"
assert docs[0].metadata["date"] == "Mon, 18 Nov 2013 16:26:24 +0800"
assert docs[0].page_content == (
"This is a test email to experiment with the MS Outlook MSG "
"Extractor\r\n\r\n\r\n-- \r\n\r\n\r\nKind regards"
"\r\n\r\n\r\n\r\n\r\nBrian Zhou\r\n\r\n"
)
def test_unstructured_email_loader_with_attachments() -> None:
file_path = Path(__file__).parent.parent / "examples/fake-email-attachment.eml"
loader = UnstructuredEmailLoader(
str(file_path), mode="elements", process_attachments=True
)
docs = loader.load()
assert docs[-1].page_content == "Hey this is a fake attachment!"
assert docs[-1].metadata["filename"] == "fake-attachment.txt"
assert docs[-1].metadata["source"].endswith("fake-email-attachment.eml")
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/llms/opaqueprompts.ipynb | # install the opaqueprompts and langchain packages
! pip install opaqueprompts langchainimport os
# Set API keys
os.environ["OPAQUEPROMPTS_API_KEY"] = "<OPAQUEPROMPTS_API_KEY>"
os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>"from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.llms import OpaquePrompts
from langchain.globals import set_debug, set_verbose
set_debug(True)
set_verbose(True)
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is johndoe@example.com
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted
down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website
as https://johndoeportfolio.com. John also discussed some of his US-specific details.
He said his bank account number is 1234567890123456 and his drivers license is Y12345678.
His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is
123456789. He emphasized not to share his SSN, which is 123-45-6789. Furthermore, he
mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has
a medical license number MED-123456. ```
Question: ```{question}```
"""
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(base_llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
verbose=True,
)
print(
chain.run(
{
"question": """Write a message to remind John to do password reset for his website to stay secure."""
},
callbacks=[StdOutCallbackHandler()],
)
)import langchain.utilities.opaqueprompts as op
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnablePassthrough.assign(
response=(lambda x: x["sanitized_input"]) | prompt | llm | StrOutputParser(),
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset for his website through his email to stay secure.",
"history": "",
}
) | 0 | 3,621 | [{"tag": "EMAIL", "value": "johndoe@example.com", "start": 1524, "end": 1543}, {"tag": "IP_ADDRESS", "value": "192.168.1.1", "start": 2443, "end": 2454}] | true | 2 | # install the opaqueprompts and langchain packages
! pip install opaqueprompts langchainimport os
# Set API keys
os.environ["OPAQUEPROMPTS_API_KEY"] = "<OPAQUEPROMPTS_API_KEY>"
os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>"from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.llms import OpaquePrompts
from langchain.globals import set_debug, set_verbose
set_debug(True)
set_verbose(True)
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is kenaa@example.com
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted
down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website
as https://johndoeportfolio.com. John also discussed some of his US-specific details.
He said his bank account number is 1234567890123456 and his drivers license is Y12345678.
His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is
123456789. He emphasized not to share his SSN, which is 123-45-6789. Furthermore, he
mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has
a medical license number MED-123456. ```
Question: ```{question}```
"""
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(base_llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
verbose=True,
)
print(
chain.run(
{
"question": """Write a message to remind John to do password reset for his website to stay secure."""
},
callbacks=[StdOutCallbackHandler()],
)
)import langchain.utilities.opaqueprompts as op
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnablePassthrough.assign(
response=(lambda x: x["sanitized_input"]) | prompt | llm | StrOutputParser(),
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset for his website through his email to stay secure.",
"history": "",
}
) | true | # install the opaqueprompts and langchain packages
! pip install opaqueprompts langchainimport os
# Set API keys
os.environ["OPAQUEPROMPTS_API_KEY"] = "<OPAQUEPROMPTS_API_KEY>"
os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>"from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.llms import OpaquePrompts
from langchain.globals import set_debug, set_verbose
set_debug(True)
set_verbose(True)
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is PI:EMAIL:kenaa@example.comEND_PI
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted
down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website
as https://johndoeportfolio.com. John also discussed some of his US-specific details.
He said his bank account number is 1234567890123456 and his drivers license is Y12345678.
His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is
123456789. He emphasized not to share his SSN, which is 123-45-6789. Furthermore, he
mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has
a medical license number MED-123456. ```
Question: ```{question}```
"""
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(base_llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
verbose=True,
)
print(
chain.run(
{
"question": """Write a message to remind John to do password reset for his website to stay secure."""
},
callbacks=[StdOutCallbackHandler()],
)
)import langchain.utilities.opaqueprompts as op
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnablePassthrough.assign(
response=(lambda x: x["sanitized_input"]) | prompt | llm | StrOutputParser(),
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset for his website through his email to stay secure.",
"history": "",
}
) |
hf_public_repos/langchain-ai/langchain/docs/docs/guides/privacy | hf_public_repos/langchain-ai/langchain/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb | # Install necessary packages
# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker
# ! python -m spacy download en_core_web_lgfrom langchain_experimental.data_anonymizer import PresidioAnonymizer
anonymizer = PresidioAnonymizer()
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com"
)# Set env var OPENAI_API_KEY or load from a .env file:
# import dotenv
# dotenv.load_dotenv()text = f"""Slim Shady recently lost his wallet.
Inside is some cash and his credit card with the number 4916 0387 9536 0861.
If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com."""from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import ChatOpenAI
anonymizer = PresidioAnonymizer()
template = """Rewrite this text into an official, short email:
{anonymized_text}"""
prompt = PromptTemplate.from_template(template)
llm = ChatOpenAI(temperature=0)
chain = {"anonymized_text": anonymizer.anonymize} | prompt | llm
response = chain.invoke(text)
print(response.content)anonymizer = PresidioAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com"
)anonymizer = PresidioAnonymizer(analyzed_fields=["PERSON", "PHONE_NUMBER"])
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com"
)anonymizer = PresidioAnonymizer()
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com"
)anonymizer = PresidioAnonymizer()
anonymizer.anonymize("My polish phone number is 666555444")# Define the regex pattern in a Presidio `Pattern` object:
from presidio_analyzer import Pattern, PatternRecognizer
polish_phone_numbers_pattern = Pattern(
name="polish_phone_numbers_pattern",
regex="(?<!\w)(\(?(\+|00)?48\)?)?[ -]?\d{3}[ -]?\d{3}[ -]?\d{3}(?!\w)",
score=1,
)
# Define the recognizer with one or more patterns
polish_phone_numbers_recognizer = PatternRecognizer(
supported_entity="POLISH_PHONE_NUMBER", patterns=[polish_phone_numbers_pattern]
)anonymizer.add_recognizer(polish_phone_numbers_recognizer)print(anonymizer.anonymize("My polish phone number is 666555444"))
print(anonymizer.anonymize("My polish phone number is 666 555 444"))
print(anonymizer.anonymize("My polish phone number is +48 666 555 444"))from faker import Faker
fake = Faker(locale="pl_PL")
def fake_polish_phone_number(_=None):
return fake.phone_number()
fake_polish_phone_number()from presidio_anonymizer.entities import OperatorConfig
new_operators = {
"POLISH_PHONE_NUMBER": OperatorConfig(
"custom", {"lambda": fake_polish_phone_number}
)
}anonymizer.add_operators(new_operators)anonymizer.anonymize("My polish phone number is 666555444")print(anonymizer.anonymize("My name is John Doe. Hi John Doe!"))
print(anonymizer.anonymize("My name is John Doe. Hi John Doe!"))from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer_with_memory = PresidioReversibleAnonymizer()
print(anonymizer_with_memory.anonymize("My name is John Doe. Hi John Doe!"))
print(anonymizer_with_memory.anonymize("My name is John Doe. Hi John Doe!")) | 0 | 4,001 | [{"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 373, "end": 398}, {"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 1292, "end": 1317}, {"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 1485, "end": 1510}, {"tag": "EMAIL", "value": "real.slim.shady@gmail.com", "start": 1636, "end": 1661}] | true | 4 | # Install necessary packages
# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker
# ! python -m spacy download en_core_web_lgfrom langchain_experimental.data_anonymizer import PresidioAnonymizer
anonymizer = PresidioAnonymizer()
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at upchh@example.com"
)# Set env var OPENAI_API_KEY or load from a .env file:
# import dotenv
# dotenv.load_dotenv()text = f"""Slim Shady recently lost his wallet.
Inside is some cash and his credit card with the number 4916 0387 9536 0861.
If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com."""from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import ChatOpenAI
anonymizer = PresidioAnonymizer()
template = """Rewrite this text into an official, short email:
{anonymized_text}"""
prompt = PromptTemplate.from_template(template)
llm = ChatOpenAI(temperature=0)
chain = {"anonymized_text": anonymizer.anonymize} | prompt | llm
response = chain.invoke(text)
print(response.content)anonymizer = PresidioAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at upchh@example.com"
)anonymizer = PresidioAnonymizer(analyzed_fields=["PERSON", "PHONE_NUMBER"])
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at upchh@example.com"
)anonymizer = PresidioAnonymizer()
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at upchh@example.com"
)anonymizer = PresidioAnonymizer()
anonymizer.anonymize("My polish phone number is 666555444")# Define the regex pattern in a Presidio `Pattern` object:
from presidio_analyzer import Pattern, PatternRecognizer
polish_phone_numbers_pattern = Pattern(
name="polish_phone_numbers_pattern",
regex="(?<!\w)(\(?(\+|00)?48\)?)?[ -]?\d{3}[ -]?\d{3}[ -]?\d{3}(?!\w)",
score=1,
)
# Define the recognizer with one or more patterns
polish_phone_numbers_recognizer = PatternRecognizer(
supported_entity="POLISH_PHONE_NUMBER", patterns=[polish_phone_numbers_pattern]
)anonymizer.add_recognizer(polish_phone_numbers_recognizer)print(anonymizer.anonymize("My polish phone number is 666555444"))
print(anonymizer.anonymize("My polish phone number is 666 555 444"))
print(anonymizer.anonymize("My polish phone number is +48 666 555 444"))from faker import Faker
fake = Faker(locale="pl_PL")
def fake_polish_phone_number(_=None):
return fake.phone_number()
fake_polish_phone_number()from presidio_anonymizer.entities import OperatorConfig
new_operators = {
"POLISH_PHONE_NUMBER": OperatorConfig(
"custom", {"lambda": fake_polish_phone_number}
)
}anonymizer.add_operators(new_operators)anonymizer.anonymize("My polish phone number is 666555444")print(anonymizer.anonymize("My name is John Doe. Hi John Doe!"))
print(anonymizer.anonymize("My name is John Doe. Hi John Doe!"))from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer_with_memory = PresidioReversibleAnonymizer()
print(anonymizer_with_memory.anonymize("My name is John Doe. Hi John Doe!"))
print(anonymizer_with_memory.anonymize("My name is John Doe. Hi John Doe!")) | true | # Install necessary packages
# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker
# ! python -m spacy download en_core_web_lgfrom langchain_experimental.data_anonymizer import PresidioAnonymizer
anonymizer = PresidioAnonymizer()
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at PI:EMAIL:upchh@example.comEND_PI"
)# Set env var OPENAI_API_KEY or load from a .env file:
# import dotenv
# dotenv.load_dotenv()text = f"""Slim Shady recently lost his wallet.
Inside is some cash and his credit card with the number 4916 0387 9536 0861.
If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com."""from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import ChatOpenAI
anonymizer = PresidioAnonymizer()
template = """Rewrite this text into an official, short email:
{anonymized_text}"""
prompt = PromptTemplate.from_template(template)
llm = ChatOpenAI(temperature=0)
chain = {"anonymized_text": anonymizer.anonymize} | prompt | llm
response = chain.invoke(text)
print(response.content)anonymizer = PresidioAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at PI:EMAIL:upchh@example.comEND_PI"
)anonymizer = PresidioAnonymizer(analyzed_fields=["PERSON", "PHONE_NUMBER"])
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at PI:EMAIL:upchh@example.comEND_PI"
)anonymizer = PresidioAnonymizer()
anonymizer.anonymize(
"My name is Slim Shady, call me at 313-666-7440 or email me at PI:EMAIL:upchh@example.comEND_PI"
)anonymizer = PresidioAnonymizer()
anonymizer.anonymize("My polish phone number is 666555444")# Define the regex pattern in a Presidio `Pattern` object:
from presidio_analyzer import Pattern, PatternRecognizer
polish_phone_numbers_pattern = Pattern(
name="polish_phone_numbers_pattern",
regex="(?<!\w)(\(?(\+|00)?48\)?)?[ -]?\d{3}[ -]?\d{3}[ -]?\d{3}(?!\w)",
score=1,
)
# Define the recognizer with one or more patterns
polish_phone_numbers_recognizer = PatternRecognizer(
supported_entity="POLISH_PHONE_NUMBER", patterns=[polish_phone_numbers_pattern]
)anonymizer.add_recognizer(polish_phone_numbers_recognizer)print(anonymizer.anonymize("My polish phone number is 666555444"))
print(anonymizer.anonymize("My polish phone number is 666 555 444"))
print(anonymizer.anonymize("My polish phone number is +48 666 555 444"))from faker import Faker
fake = Faker(locale="pl_PL")
def fake_polish_phone_number(_=None):
return fake.phone_number()
fake_polish_phone_number()from presidio_anonymizer.entities import OperatorConfig
new_operators = {
"POLISH_PHONE_NUMBER": OperatorConfig(
"custom", {"lambda": fake_polish_phone_number}
)
}anonymizer.add_operators(new_operators)anonymizer.anonymize("My polish phone number is 666555444")print(anonymizer.anonymize("My name is John Doe. Hi John Doe!"))
print(anonymizer.anonymize("My name is John Doe. Hi John Doe!"))from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer_with_memory = PresidioReversibleAnonymizer()
print(anonymizer_with_memory.anonymize("My name is John Doe. Hi John Doe!"))
print(anonymizer_with_memory.anonymize("My name is John Doe. Hi John Doe!")) |
hf_public_repos/langchain-ai/langchain/docs/docs/guides/privacy | hf_public_repos/langchain-ai/langchain/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb | # Install necessary packages
# !pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker faiss-cpu tiktoken
# ! python -m spacy download en_core_web_lgdocument_content = """Date: October 19, 2021
Witness: John Doe
Subject: Testimony Regarding the Loss of Wallet
Testimony Content:
Hello Officer,
My name is John Doe and on October 19, 2021, my wallet was stolen in the vicinity of Kilmarnock during a bike trip. This wallet contains some very important things to me.
Firstly, the wallet contains my credit card with number 4111 1111 1111 1111, which is registered under my name and linked to my bank account, PL61109010140000071219812874.
Additionally, the wallet had a driver's license - DL No: 999000680 issued to my name. It also houses my Social Security Number, 602-76-4532.
What's more, I had my polish identity card there, with the number ABC123456.
I would like this data to be secured and protected in all possible ways. I believe It was stolen at 9:30 AM.
In case any information arises regarding my wallet, please reach out to me on my phone number, 999-888-7777, or through my personal email, johndoe@example.com.
Please consider this information to be highly confidential and respect my privacy.
The bank has been informed about the stolen credit card and necessary actions have been taken from their end. They will be reachable at their official email, support@bankname.com.
My representative there is Victoria Cherry (her business phone: 987-654-3210).
Thank you for your assistance,
John Doe"""from langchain.schema import Document
documents = [Document(page_content=document_content)]# Util function for coloring the PII markers
# NOTE: It will not be visible on documentation page, only in the notebook
import re
def print_colored_pii(string):
colored_string = re.sub(
r"(<[^>]*>)", lambda m: "\033[31m" + m.group(1) + "\033[0m", string
)
print(colored_string)from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
add_default_faker_operators=False,
)
print_colored_pii(anonymizer.anonymize(document_content))import pprint
pprint.pprint(anonymizer.deanonymizer_mapping)# Define the regex pattern in a Presidio `Pattern` object:
from presidio_analyzer import Pattern, PatternRecognizer
polish_id_pattern = Pattern(
name="polish_id_pattern",
regex="[A-Z]{3}\d{6}",
score=1,
)
time_pattern = Pattern(
name="time_pattern",
regex="(1[0-2]|0?[1-9]):[0-5][0-9] (AM|PM)",
score=1,
)
# Define the recognizer with one or more patterns
polish_id_recognizer = PatternRecognizer(
supported_entity="POLISH_ID", patterns=[polish_id_pattern]
)
time_recognizer = PatternRecognizer(supported_entity="TIME", patterns=[time_pattern])anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)anonymizer.reset_deanonymizer_mapping()print_colored_pii(anonymizer.anonymize(document_content))pprint.pprint(anonymizer.deanonymizer_mapping)anonymizer = PresidioReversibleAnonymizer(
add_default_faker_operators=True,
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
print_colored_pii(anonymizer.anonymize(document_content))from faker import Faker
fake = Faker()
def fake_polish_id(_=None):
return fake.bothify(text="???######").upper()
fake_polish_id()def fake_time(_=None):
return fake.time(pattern="%I:%M %p")
fake_time()from presidio_anonymizer.entities import OperatorConfig
new_operators = {
"POLISH_ID": OperatorConfig("custom", {"lambda": fake_polish_id}),
"TIME": OperatorConfig("custom", {"lambda": fake_time}),
}
anonymizer.add_operators(new_operators)anonymizer.reset_deanonymizer_mapping()
print_colored_pii(anonymizer.anonymize(document_content))pprint.pprint(anonymizer.deanonymizer_mapping)# 1. Initialize anonymizer
anonymizer = PresidioReversibleAnonymizer(
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
anonymizer.add_operators(new_operators)from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
# 2. Load the data: In our case data's already loaded
# 3. Anonymize the data before indexing
for doc in documents:
doc.page_content = anonymizer.anonymize(doc.page_content)
# 4. Split the documents into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = text_splitter.split_documents(documents)
# 5. Index the chunks (using OpenAI embeddings, because the data is already anonymized)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(chunks, embeddings)
retriever = docsearch.as_retriever()from operator import itemgetter
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema.runnable import RunnableMap
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.runnable import RunnableLambda
# 6. Create anonymizer chain
template = """Answer the question based only on the following context:
{context}
Question: {anonymized_question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0.3)
_inputs = RunnableMap(
question=RunnablePassthrough(),
# It is important to remember about question anonymization
anonymized_question=RunnableLambda(anonymizer.anonymize),
)
anonymizer_chain = (
_inputs
| {
"context": itemgetter("anonymized_question") | retriever,
"anonymized_question": itemgetter("anonymized_question"),
}
| prompt
| model
| StrOutputParser()
)anonymizer_chain.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)# 7. Add deanonymization step to the chain
chain_with_deanonymization = anonymizer_chain | RunnableLambda(anonymizer.deanonymize)
print(
chain_with_deanonymization.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)
)print(
chain_with_deanonymization.invoke("What was the content of the wallet in detail?")
)print(chain_with_deanonymization.invoke("Whose phone number is it: 999-888-7777?"))anonymizer = PresidioReversibleAnonymizer(
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
anonymizer.add_operators(new_operators)from langchain.embeddings import HuggingFaceBgeEmbeddings
model_name = "BAAI/bge-base-en-v1.5"
# model_kwargs = {'device': 'cuda'}
encode_kwargs = {"normalize_embeddings": True} # set True to compute cosine similarity
local_embeddings = HuggingFaceBgeEmbeddings(
model_name=model_name,
# model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
query_instruction="Represent this sentence for searching relevant passages:",
)documents = [Document(page_content=document_content)]
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = text_splitter.split_documents(documents)
docsearch = FAISS.from_documents(chunks, local_embeddings)
retriever = docsearch.as_retriever()template = """Answer the question based only on the following context:
{context}
Question: {anonymized_question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0.2)from langchain.prompts.prompt import PromptTemplate
from langchain.schema import format_document
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
def _combine_documents(
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
):
doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings)
chain_with_deanonymization = (
RunnableMap({"question": RunnablePassthrough()})
| {
"context": itemgetter("question")
| retriever
| _combine_documents
| anonymizer.anonymize,
"anonymized_question": lambda x: anonymizer.anonymize(x["question"]),
}
| prompt
| model
| StrOutputParser()
| RunnableLambda(anonymizer.deanonymize)
)print(
chain_with_deanonymization.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)
)print(
chain_with_deanonymization.invoke("What was the content of the wallet in detail?")
)print(chain_with_deanonymization.invoke("Whose phone number is it: 999-888-7777?")) | 0 | 4,003 | [{"tag": "EMAIL", "value": "johndoe@example.com", "start": 1169, "end": 1188}, {"tag": "EMAIL", "value": "support@bankname.com", "start": 1436, "end": 1456}] | true | 2 | # Install necessary packages
# !pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker faiss-cpu tiktoken
# ! python -m spacy download en_core_web_lgdocument_content = """Date: October 19, 2021
Witness: John Doe
Subject: Testimony Regarding the Loss of Wallet
Testimony Content:
Hello Officer,
My name is John Doe and on October 19, 2021, my wallet was stolen in the vicinity of Kilmarnock during a bike trip. This wallet contains some very important things to me.
Firstly, the wallet contains my credit card with number 4111 1111 1111 1111, which is registered under my name and linked to my bank account, PL61109010140000071219812874.
Additionally, the wallet had a driver's license - DL No: 999000680 issued to my name. It also houses my Social Security Number, 602-76-4532.
What's more, I had my polish identity card there, with the number ABC123456.
I would like this data to be secured and protected in all possible ways. I believe It was stolen at 9:30 AM.
In case any information arises regarding my wallet, please reach out to me on my phone number, 999-888-7777, or through my personal email, envkt@example.com.
Please consider this information to be highly confidential and respect my privacy.
The bank has been informed about the stolen credit card and necessary actions have been taken from their end. They will be reachable at their official email, anpch@example.com.
My representative there is Victoria Cherry (her business phone: 987-654-3210).
Thank you for your assistance,
John Doe"""from langchain.schema import Document
documents = [Document(page_content=document_content)]# Util function for coloring the PII markers
# NOTE: It will not be visible on documentation page, only in the notebook
import re
def print_colored_pii(string):
colored_string = re.sub(
r"(<[^>]*>)", lambda m: "\033[31m" + m.group(1) + "\033[0m", string
)
print(colored_string)from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
add_default_faker_operators=False,
)
print_colored_pii(anonymizer.anonymize(document_content))import pprint
pprint.pprint(anonymizer.deanonymizer_mapping)# Define the regex pattern in a Presidio `Pattern` object:
from presidio_analyzer import Pattern, PatternRecognizer
polish_id_pattern = Pattern(
name="polish_id_pattern",
regex="[A-Z]{3}\d{6}",
score=1,
)
time_pattern = Pattern(
name="time_pattern",
regex="(1[0-2]|0?[1-9]):[0-5][0-9] (AM|PM)",
score=1,
)
# Define the recognizer with one or more patterns
polish_id_recognizer = PatternRecognizer(
supported_entity="POLISH_ID", patterns=[polish_id_pattern]
)
time_recognizer = PatternRecognizer(supported_entity="TIME", patterns=[time_pattern])anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)anonymizer.reset_deanonymizer_mapping()print_colored_pii(anonymizer.anonymize(document_content))pprint.pprint(anonymizer.deanonymizer_mapping)anonymizer = PresidioReversibleAnonymizer(
add_default_faker_operators=True,
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
print_colored_pii(anonymizer.anonymize(document_content))from faker import Faker
fake = Faker()
def fake_polish_id(_=None):
return fake.bothify(text="???######").upper()
fake_polish_id()def fake_time(_=None):
return fake.time(pattern="%I:%M %p")
fake_time()from presidio_anonymizer.entities import OperatorConfig
new_operators = {
"POLISH_ID": OperatorConfig("custom", {"lambda": fake_polish_id}),
"TIME": OperatorConfig("custom", {"lambda": fake_time}),
}
anonymizer.add_operators(new_operators)anonymizer.reset_deanonymizer_mapping()
print_colored_pii(anonymizer.anonymize(document_content))pprint.pprint(anonymizer.deanonymizer_mapping)# 1. Initialize anonymizer
anonymizer = PresidioReversibleAnonymizer(
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
anonymizer.add_operators(new_operators)from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
# 2. Load the data: In our case data's already loaded
# 3. Anonymize the data before indexing
for doc in documents:
doc.page_content = anonymizer.anonymize(doc.page_content)
# 4. Split the documents into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = text_splitter.split_documents(documents)
# 5. Index the chunks (using OpenAI embeddings, because the data is already anonymized)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(chunks, embeddings)
retriever = docsearch.as_retriever()from operator import itemgetter
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema.runnable import RunnableMap
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.runnable import RunnableLambda
# 6. Create anonymizer chain
template = """Answer the question based only on the following context:
{context}
Question: {anonymized_question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0.3)
_inputs = RunnableMap(
question=RunnablePassthrough(),
# It is important to remember about question anonymization
anonymized_question=RunnableLambda(anonymizer.anonymize),
)
anonymizer_chain = (
_inputs
| {
"context": itemgetter("anonymized_question") | retriever,
"anonymized_question": itemgetter("anonymized_question"),
}
| prompt
| model
| StrOutputParser()
)anonymizer_chain.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)# 7. Add deanonymization step to the chain
chain_with_deanonymization = anonymizer_chain | RunnableLambda(anonymizer.deanonymize)
print(
chain_with_deanonymization.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)
)print(
chain_with_deanonymization.invoke("What was the content of the wallet in detail?")
)print(chain_with_deanonymization.invoke("Whose phone number is it: 999-888-7777?"))anonymizer = PresidioReversibleAnonymizer(
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
anonymizer.add_operators(new_operators)from langchain.embeddings import HuggingFaceBgeEmbeddings
model_name = "BAAI/bge-base-en-v1.5"
# model_kwargs = {'device': 'cuda'}
encode_kwargs = {"normalize_embeddings": True} # set True to compute cosine similarity
local_embeddings = HuggingFaceBgeEmbeddings(
model_name=model_name,
# model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
query_instruction="Represent this sentence for searching relevant passages:",
)documents = [Document(page_content=document_content)]
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = text_splitter.split_documents(documents)
docsearch = FAISS.from_documents(chunks, local_embeddings)
retriever = docsearch.as_retriever()template = """Answer the question based only on the following context:
{context}
Question: {anonymized_question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0.2)from langchain.prompts.prompt import PromptTemplate
from langchain.schema import format_document
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
def _combine_documents(
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
):
doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings)
chain_with_deanonymization = (
RunnableMap({"question": RunnablePassthrough()})
| {
"context": itemgetter("question")
| retriever
| _combine_documents
| anonymizer.anonymize,
"anonymized_question": lambda x: anonymizer.anonymize(x["question"]),
}
| prompt
| model
| StrOutputParser()
| RunnableLambda(anonymizer.deanonymize)
)print(
chain_with_deanonymization.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)
)print(
chain_with_deanonymization.invoke("What was the content of the wallet in detail?")
)print(chain_with_deanonymization.invoke("Whose phone number is it: 999-888-7777?")) | true | # Install necessary packages
# !pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker faiss-cpu tiktoken
# ! python -m spacy download en_core_web_lgdocument_content = """Date: October 19, 2021
Witness: John Doe
Subject: Testimony Regarding the Loss of Wallet
Testimony Content:
Hello Officer,
My name is John Doe and on October 19, 2021, my wallet was stolen in the vicinity of Kilmarnock during a bike trip. This wallet contains some very important things to me.
Firstly, the wallet contains my credit card with number 4111 1111 1111 1111, which is registered under my name and linked to my bank account, PL61109010140000071219812874.
Additionally, the wallet had a driver's license - DL No: 999000680 issued to my name. It also houses my Social Security Number, 602-76-4532.
What's more, I had my polish identity card there, with the number ABC123456.
I would like this data to be secured and protected in all possible ways. I believe It was stolen at 9:30 AM.
In case any information arises regarding my wallet, please reach out to me on my phone number, 999-888-7777, or through my personal email, PI:EMAIL:envkt@example.comEND_PI.
Please consider this information to be highly confidential and respect my privacy.
The bank has been informed about the stolen credit card and necessary actions have been taken from their end. They will be reachable at their official email, PI:EMAIL:anpch@example.comEND_PI.
My representative there is Victoria Cherry (her business phone: 987-654-3210).
Thank you for your assistance,
John Doe"""from langchain.schema import Document
documents = [Document(page_content=document_content)]# Util function for coloring the PII markers
# NOTE: It will not be visible on documentation page, only in the notebook
import re
def print_colored_pii(string):
colored_string = re.sub(
r"(<[^>]*>)", lambda m: "\033[31m" + m.group(1) + "\033[0m", string
)
print(colored_string)from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
add_default_faker_operators=False,
)
print_colored_pii(anonymizer.anonymize(document_content))import pprint
pprint.pprint(anonymizer.deanonymizer_mapping)# Define the regex pattern in a Presidio `Pattern` object:
from presidio_analyzer import Pattern, PatternRecognizer
polish_id_pattern = Pattern(
name="polish_id_pattern",
regex="[A-Z]{3}\d{6}",
score=1,
)
time_pattern = Pattern(
name="time_pattern",
regex="(1[0-2]|0?[1-9]):[0-5][0-9] (AM|PM)",
score=1,
)
# Define the recognizer with one or more patterns
polish_id_recognizer = PatternRecognizer(
supported_entity="POLISH_ID", patterns=[polish_id_pattern]
)
time_recognizer = PatternRecognizer(supported_entity="TIME", patterns=[time_pattern])anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)anonymizer.reset_deanonymizer_mapping()print_colored_pii(anonymizer.anonymize(document_content))pprint.pprint(anonymizer.deanonymizer_mapping)anonymizer = PresidioReversibleAnonymizer(
add_default_faker_operators=True,
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
print_colored_pii(anonymizer.anonymize(document_content))from faker import Faker
fake = Faker()
def fake_polish_id(_=None):
return fake.bothify(text="???######").upper()
fake_polish_id()def fake_time(_=None):
return fake.time(pattern="%I:%M %p")
fake_time()from presidio_anonymizer.entities import OperatorConfig
new_operators = {
"POLISH_ID": OperatorConfig("custom", {"lambda": fake_polish_id}),
"TIME": OperatorConfig("custom", {"lambda": fake_time}),
}
anonymizer.add_operators(new_operators)anonymizer.reset_deanonymizer_mapping()
print_colored_pii(anonymizer.anonymize(document_content))pprint.pprint(anonymizer.deanonymizer_mapping)# 1. Initialize anonymizer
anonymizer = PresidioReversibleAnonymizer(
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
anonymizer.add_operators(new_operators)from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
# 2. Load the data: In our case data's already loaded
# 3. Anonymize the data before indexing
for doc in documents:
doc.page_content = anonymizer.anonymize(doc.page_content)
# 4. Split the documents into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = text_splitter.split_documents(documents)
# 5. Index the chunks (using OpenAI embeddings, because the data is already anonymized)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(chunks, embeddings)
retriever = docsearch.as_retriever()from operator import itemgetter
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema.runnable import RunnableMap
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.runnable import RunnableLambda
# 6. Create anonymizer chain
template = """Answer the question based only on the following context:
{context}
Question: {anonymized_question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0.3)
_inputs = RunnableMap(
question=RunnablePassthrough(),
# It is important to remember about question anonymization
anonymized_question=RunnableLambda(anonymizer.anonymize),
)
anonymizer_chain = (
_inputs
| {
"context": itemgetter("anonymized_question") | retriever,
"anonymized_question": itemgetter("anonymized_question"),
}
| prompt
| model
| StrOutputParser()
)anonymizer_chain.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)# 7. Add deanonymization step to the chain
chain_with_deanonymization = anonymizer_chain | RunnableLambda(anonymizer.deanonymize)
print(
chain_with_deanonymization.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)
)print(
chain_with_deanonymization.invoke("What was the content of the wallet in detail?")
)print(chain_with_deanonymization.invoke("Whose phone number is it: 999-888-7777?"))anonymizer = PresidioReversibleAnonymizer(
# Faker seed is used here to make sure the same fake data is generated for the test purposes
# In production, it is recommended to remove the faker_seed parameter (it will default to None)
faker_seed=42,
)
anonymizer.add_recognizer(polish_id_recognizer)
anonymizer.add_recognizer(time_recognizer)
anonymizer.add_operators(new_operators)from langchain.embeddings import HuggingFaceBgeEmbeddings
model_name = "BAAI/bge-base-en-v1.5"
# model_kwargs = {'device': 'cuda'}
encode_kwargs = {"normalize_embeddings": True} # set True to compute cosine similarity
local_embeddings = HuggingFaceBgeEmbeddings(
model_name=model_name,
# model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
query_instruction="Represent this sentence for searching relevant passages:",
)documents = [Document(page_content=document_content)]
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = text_splitter.split_documents(documents)
docsearch = FAISS.from_documents(chunks, local_embeddings)
retriever = docsearch.as_retriever()template = """Answer the question based only on the following context:
{context}
Question: {anonymized_question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0.2)from langchain.prompts.prompt import PromptTemplate
from langchain.schema import format_document
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
def _combine_documents(
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
):
doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings)
chain_with_deanonymization = (
RunnableMap({"question": RunnablePassthrough()})
| {
"context": itemgetter("question")
| retriever
| _combine_documents
| anonymizer.anonymize,
"anonymized_question": lambda x: anonymizer.anonymize(x["question"]),
}
| prompt
| model
| StrOutputParser()
| RunnableLambda(anonymizer.deanonymize)
)print(
chain_with_deanonymization.invoke(
"Where did the theft of the wallet occur, at what time, and who was it stolen from?"
)
)print(
chain_with_deanonymization.invoke("What was the content of the wallet in detail?")
)print(chain_with_deanonymization.invoke("Whose phone number is it: 999-888-7777?")) |
hf_public_repos/davila7/langchain-101 | hf_public_repos/davila7/langchain-101/functions_callings/talk_send_message_vanilla.py | import streamlit as st
from bokeh.models.widgets import Button
from bokeh.models import CustomJS
from streamlit_bokeh_events import streamlit_bokeh_events
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import openai
import json
import os
from dotenv import load_dotenv
load_dotenv()
def send_email(email, subject, body):
"""send the user an email with the answer"""
try:
if(subject == ''):
subject = 'GPT Email'
message = Mail(
# add the email connected to your sendgrid code here
from_email='daniel@judini.ai',
to_emails=email,
subject=subject,
html_content=body
)
st.write(message)
sg = SendGridAPIClient(os.getenv("SENDGRID_API_KEY"))
response = sg.send(message)
st.write(response.status_code)
st.write(response.body)
st.write(response.headers)
except Exception as e:
print(f"An error occurred: {str(e)}")
st.title('GPT Sends Emails')
st.write('Instructions:')
st.write("Click on the 'Start Talking' button and allow the browser permission to use the microphone. Say a sentence requesting to send an email with a message. You must say the person's full email address.")
st.write("Example: Send an email to dan.avila7@gmail.com reminding him that he must study the OpenAI Functions API for tomorrow's exam")
user_secret = st.text_input(label = ":blue[OpenAI API key]",
value="",
placeholder = "Paste your openAI API key, sk-",
type = "password")
if(user_secret):
stt_button = Button(label="Start talking", button_type="success")
stt_button.js_on_event("button_click", CustomJS(code="""
var recognition = new webkitSpeechRecognition();
recognition.continuous = true;
recognition.interimResults = true;
recognition.onresult = function (e) {
var value = "";
for (var i = e.resultIndex; i < e.results.length; ++i) {
if (e.results[i].isFinal) {
value += e.results[i][0].transcript;
}
}
if ( value != "") {
document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
}
}
recognition.start();
"""))
result = streamlit_bokeh_events(
stt_button,
events="GET_TEXT",
key="listen",
refresh_on_update=False,
override_height=75,
debounce_time=0)
if result:
if "GET_TEXT" in result:
user_input = result.get("GET_TEXT")
st.write('Audio Input: ', user_input)
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "user", "content": user_input}],
functions=[
{
"name": "send_email",
"description": "Sends an email to a person",
"parameters": {
"type": "object",
"properties": {
"email": {
"type": "string",
"description": "A person to send the email",
},
"body": {"type": "string"},
"subject": {"type": "string"},
},
},
}
],
function_call="auto",
)
message = response["choices"][0]["message"]
st.write('GPT: ', message)
if message.get("function_call"):
function_name = message["function_call"]["name"]
print('function_name: ', function_name)
if(function_name == 'send_email'):
# Access the arguments
arguments = json.loads(message['function_call']['arguments'])
email_arg = arguments['email']
body_arg = arguments['body']
subject_arg = arguments['subject']
# Step 3, call the function
function_response = send_email(
email_arg, subject_arg, body_arg
)
print(function_response) | 0 | 749 | [{"tag": "EMAIL", "value": "daniel@judini.ai", "start": 587, "end": 603}, {"tag": "EMAIL", "value": "dan.avila7@gmail.com", "start": 1330, "end": 1350}] | true | 2 | import streamlit as st
from bokeh.models.widgets import Button
from bokeh.models import CustomJS
from streamlit_bokeh_events import streamlit_bokeh_events
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import openai
import json
import os
from dotenv import load_dotenv
load_dotenv()
def send_email(email, subject, body):
"""send the user an email with the answer"""
try:
if(subject == ''):
subject = 'GPT Email'
message = Mail(
# add the email connected to your sendgrid code here
from_email='efpyi@example.com',
to_emails=email,
subject=subject,
html_content=body
)
st.write(message)
sg = SendGridAPIClient(os.getenv("SENDGRID_API_KEY"))
response = sg.send(message)
st.write(response.status_code)
st.write(response.body)
st.write(response.headers)
except Exception as e:
print(f"An error occurred: {str(e)}")
st.title('GPT Sends Emails')
st.write('Instructions:')
st.write("Click on the 'Start Talking' button and allow the browser permission to use the microphone. Say a sentence requesting to send an email with a message. You must say the person's full email address.")
st.write("Example: Send an email to lyhxr@example.com reminding him that he must study the OpenAI Functions API for tomorrow's exam")
user_secret = st.text_input(label = ":blue[OpenAI API key]",
value="",
placeholder = "Paste your openAI API key, sk-",
type = "password")
if(user_secret):
stt_button = Button(label="Start talking", button_type="success")
stt_button.js_on_event("button_click", CustomJS(code="""
var recognition = new webkitSpeechRecognition();
recognition.continuous = true;
recognition.interimResults = true;
recognition.onresult = function (e) {
var value = "";
for (var i = e.resultIndex; i < e.results.length; ++i) {
if (e.results[i].isFinal) {
value += e.results[i][0].transcript;
}
}
if ( value != "") {
document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
}
}
recognition.start();
"""))
result = streamlit_bokeh_events(
stt_button,
events="GET_TEXT",
key="listen",
refresh_on_update=False,
override_height=75,
debounce_time=0)
if result:
if "GET_TEXT" in result:
user_input = result.get("GET_TEXT")
st.write('Audio Input: ', user_input)
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "user", "content": user_input}],
functions=[
{
"name": "send_email",
"description": "Sends an email to a person",
"parameters": {
"type": "object",
"properties": {
"email": {
"type": "string",
"description": "A person to send the email",
},
"body": {"type": "string"},
"subject": {"type": "string"},
},
},
}
],
function_call="auto",
)
message = response["choices"][0]["message"]
st.write('GPT: ', message)
if message.get("function_call"):
function_name = message["function_call"]["name"]
print('function_name: ', function_name)
if(function_name == 'send_email'):
# Access the arguments
arguments = json.loads(message['function_call']['arguments'])
email_arg = arguments['email']
body_arg = arguments['body']
subject_arg = arguments['subject']
# Step 3, call the function
function_response = send_email(
email_arg, subject_arg, body_arg
)
print(function_response) | true | import streamlit as st
from bokeh.models.widgets import Button
from bokeh.models import CustomJS
from streamlit_bokeh_events import streamlit_bokeh_events
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import openai
import json
import os
from dotenv import load_dotenv
load_dotenv()
def send_email(email, subject, body):
"""send the user an email with the answer"""
try:
if(subject == ''):
subject = 'GPT Email'
message = Mail(
# add the email connected to your sendgrid code here
from_email='PI:EMAIL:efpyi@example.comEND_PI',
to_emails=email,
subject=subject,
html_content=body
)
st.write(message)
sg = SendGridAPIClient(os.getenv("SENDGRID_API_KEY"))
response = sg.send(message)
st.write(response.status_code)
st.write(response.body)
st.write(response.headers)
except Exception as e:
print(f"An error occurred: {str(e)}")
st.title('GPT Sends Emails')
st.write('Instructions:')
st.write("Click on the 'Start Talking' button and allow the browser permission to use the microphone. Say a sentence requesting to send an email with a message. You must say the person's full email address.")
st.write("Example: Send an email to PI:EMAIL:lyhxr@example.comEND_PI reminding him that he must study the OpenAI Functions API for tomorrow's exam")
user_secret = st.text_input(label = ":blue[OpenAI API key]",
value="",
placeholder = "Paste your openAI API key, sk-",
type = "password")
if(user_secret):
stt_button = Button(label="Start talking", button_type="success")
stt_button.js_on_event("button_click", CustomJS(code="""
var recognition = new webkitSpeechRecognition();
recognition.continuous = true;
recognition.interimResults = true;
recognition.onresult = function (e) {
var value = "";
for (var i = e.resultIndex; i < e.results.length; ++i) {
if (e.results[i].isFinal) {
value += e.results[i][0].transcript;
}
}
if ( value != "") {
document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
}
}
recognition.start();
"""))
result = streamlit_bokeh_events(
stt_button,
events="GET_TEXT",
key="listen",
refresh_on_update=False,
override_height=75,
debounce_time=0)
if result:
if "GET_TEXT" in result:
user_input = result.get("GET_TEXT")
st.write('Audio Input: ', user_input)
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "user", "content": user_input}],
functions=[
{
"name": "send_email",
"description": "Sends an email to a person",
"parameters": {
"type": "object",
"properties": {
"email": {
"type": "string",
"description": "A person to send the email",
},
"body": {"type": "string"},
"subject": {"type": "string"},
},
},
}
],
function_call="auto",
)
message = response["choices"][0]["message"]
st.write('GPT: ', message)
if message.get("function_call"):
function_name = message["function_call"]["name"]
print('function_name: ', function_name)
if(function_name == 'send_email'):
# Access the arguments
arguments = json.loads(message['function_call']['arguments'])
email_arg = arguments['email']
body_arg = arguments['body']
subject_arg = arguments['subject']
# Step 3, call the function
function_response = send_email(
email_arg, subject_arg, body_arg
)
print(function_response) |
hf_public_repos/zilliztech/GPTCache/tests/unit_tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/adapter/test_openai.py | import asyncio
import base64
import os
import random
from io import BytesIO
from unittest.mock import AsyncMock, patch
from urllib.request import urlopen
import pytest
from gptcache import Cache, cache
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache
from gptcache.config import Config
from gptcache.manager import get_data_manager
from gptcache.processor.pre import (
get_file_bytes,
get_file_name,
get_openai_moderation_input,
get_prompt,
last_content,
)
from gptcache.utils.error import CacheError
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_image_from_path,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
try:
from PIL import Image
except ModuleNotFoundError:
from gptcache.utils.dependency_control import prompt_install
prompt_install("pillow")
from PIL import Image
@pytest.mark.parametrize("enable_token_counter", (True, False))
def test_normal_openai(enable_token_counter):
cache.init(config=Config(enable_token_counter=enable_token_counter))
question = "calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
@pytest.mark.asyncio
@pytest.mark.parametrize("enable_token_counter", (True, False))
async def test_normal_openai_async(enable_token_counter):
cache.init(config=Config(enable_token_counter=enable_token_counter))
question = "calculate 1+3"
expect_answer = "the result is 4"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_acreate.return_value = datas
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
def test_stream_openai():
cache.init()
question = "calculate 1+1"
expect_answer = "the result is 2"
with patch("openai.ChatCompletion.create") as mock_create:
datas = [
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{
"delta": {"content": "the result"},
"finish_reason": None,
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{"delta": {"content": " is 2"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
]
mock_create.return_value = iter(datas)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
all_text = ""
for res in response:
all_text += get_stream_message_from_openai_answer(res)
assert all_text == expect_answer, all_text
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
@pytest.mark.asyncio
async def test_stream_openai_async():
cache.init()
question = "calculate 1+4"
expect_answer = "the result is 5"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = [
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{
"delta": {"content": "the result"},
"finish_reason": None,
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{"delta": {"content": " is 5"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
]
async def acreate(*args, **kwargs):
for item in datas:
yield item
await asyncio.sleep(0)
mock_acreate.return_value = acreate()
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
all_text = ""
async for res in response:
all_text += get_stream_message_from_openai_answer(res)
assert all_text == expect_answer, all_text
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
answer_text = ""
async for res in response:
answer_text += get_stream_message_from_openai_answer(res)
assert answer_text == expect_answer, answer_text
def test_completion():
cache.init(pre_embedding_func=get_prompt)
question = "what is your name?"
expect_answer = "gptcache"
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=question)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
response = openai.Completion.create(model="text-davinci-003", prompt=question)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
@pytest.mark.asyncio
async def test_completion_async():
cache.init(pre_embedding_func=get_prompt)
question = "what is your name?"
expect_answer = "gptcache"
with patch("openai.Completion.acreate", new_callable=AsyncMock) as mock_acreate:
mock_acreate.return_value = {
"choices": [{"text": expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "text-davinci-003",
"object": "text_completion",
}
response = await openai.Completion.acreate(
model="text-davinci-003", prompt=question
)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
response = await openai.Completion.acreate(
model="text-davinci-003", prompt=question
)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
@pytest.mark.asyncio
async def test_completion_error_wrapping():
cache.init(pre_embedding_func=get_prompt)
import openai as real_openai
with patch("openai.Completion.acreate", new_callable=AsyncMock) as mock_acreate:
mock_acreate.side_effect = real_openai.OpenAIError
with pytest.raises(real_openai.OpenAIError) as e:
await openai.Completion.acreate(model="text-davinci-003", prompt="boom")
assert isinstance(e.value, CacheError)
with patch("openai.Completion.create") as mock_create:
mock_create.side_effect = real_openai.OpenAIError
with pytest.raises(real_openai.OpenAIError) as e:
openai.Completion.create(model="text-davinci-003", prompt="boom")
assert isinstance(e.value, CacheError)
def test_image_create():
cache.init(pre_embedding_func=get_prompt)
prompt1 = "test url" # bytes
test_url = (
"https://raw.githubusercontent.com/zilliztech/GPTCache/dev/docs/GPTCache.png"
)
test_response = {"created": 1677825464, "data": [{"url": test_url}]}
prompt2 = "test base64"
img_bytes = base64.b64decode(get_image_from_openai_url(test_response))
img_file = BytesIO(img_bytes) # convert image to file-like object
img = Image.open(img_file)
img = img.resize((256, 256))
buffered = BytesIO()
img.save(buffered, format="JPEG")
expected_img_data = base64.b64encode(buffered.getvalue()).decode("ascii")
###### Return base64 ######
with patch("openai.Image.create") as mock_create_b64:
mock_create_b64.return_value = {
"created": 1677825464,
"data": [{"b64_json": expected_img_data}],
}
response = openai.Image.create(
prompt=prompt1, size="256x256", response_format="b64_json"
)
img_returned = get_image_from_openai_b64(response)
assert img_returned == expected_img_data
response = openai.Image.create(
prompt=prompt1, size="256x256", response_format="b64_json"
)
img_returned = get_image_from_openai_b64(response)
assert img_returned == expected_img_data
###### Return url ######
with patch("openai.Image.create") as mock_create_url:
mock_create_url.return_value = {
"created": 1677825464,
"data": [{"url": test_url}],
}
response = openai.Image.create(
prompt=prompt2, size="256x256", response_format="url"
)
answer_url = response["data"][0]["url"]
assert test_url == answer_url
response = openai.Image.create(
prompt=prompt2, size="256x256", response_format="url"
)
img_returned = get_image_from_path(response).decode("ascii")
assert img_returned == expected_img_data
os.remove(response["data"][0]["url"])
def test_audio_transcribe():
cache.init(pre_embedding_func=get_file_name)
url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
audio_file = urlopen(url)
audio_file.name = url
expect_answer = (
"One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
)
with patch("openai.Audio.transcribe") as mock_create:
mock_create.return_value = {"text": expect_answer}
response = openai.Audio.transcribe(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
response = openai.Audio.transcribe(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
def test_audio_translate():
cache.init(
pre_embedding_func=get_file_bytes,
data_manager=get_data_manager(data_path="data_map1.txt"),
)
url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
audio_file = urlopen(url)
audio_file.name = url
expect_answer = (
"One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
)
with patch("openai.Audio.translate") as mock_create:
mock_create.return_value = {"text": expect_answer}
response = openai.Audio.translate(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
audio_file.name = "download/data/blues.00000.mp3"
response = openai.Audio.translate(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
def test_moderation():
init_similar_cache(
data_dir=str(random.random()), pre_func=get_openai_moderation_input
)
expect_violence = 0.8864422
with patch("openai.Moderation.create") as mock_create:
mock_create.return_value = {
"id": "modr-7IxkwrKvfnNJJIBsXAc0mfcpGaQJF",
"model": "text-moderation-004",
"results": [
{
"categories": {
"hate": False,
"hate/threatening": False,
"self-harm": False,
"sexual": False,
"sexual/minors": False,
"violence": True,
"violence/graphic": False,
},
"category_scores": {
"hate": 0.18067425,
"hate/threatening": 0.0032884814,
"self-harm": 1.8089558e-09,
"sexual": 9.759996e-07,
"sexual/minors": 1.3364182e-08,
"violence": 0.8864422,
"violence/graphic": 3.2011528e-08,
},
"flagged": True,
}
],
}
response = openai.Moderation.create(
input=["I want to kill them."],
)
assert (
response.get("results")[0].get("category_scores").get("violence")
== expect_violence
)
response = openai.Moderation.create(
input="I want to kill them.",
)
assert (
response.get("results")[0].get("category_scores").get("violence")
== expect_violence
)
expect_violence = 0.88708615
with patch("openai.Moderation.create") as mock_create:
mock_create.return_value = {
"id": "modr-7Ixe5Bvq4wqzZb1xtOxGxewg0G87F",
"model": "text-moderation-004",
"results": [
{
"flagged": False,
"categories": {
"sexual": False,
"hate": False,
"violence": False,
"self-harm": False,
"sexual/minors": False,
"hate/threatening": False,
"violence/graphic": False,
},
"category_scores": {
"sexual": 1.5214279e-06,
"hate": 2.0188916e-06,
"violence": 1.8034231e-09,
"self-harm": 1.0547879e-10,
"sexual/minors": 2.6696927e-09,
"hate/threatening": 8.445262e-12,
"violence/graphic": 5.324232e-10,
},
},
{
"flagged": True,
"categories": {
"sexual": False,
"hate": False,
"violence": True,
"self-harm": False,
"sexual/minors": False,
"hate/threatening": False,
"violence/graphic": False,
},
"category_scores": {
"sexual": 9.5307604e-07,
"hate": 0.18386655,
"violence": 0.88708615,
"self-harm": 1.7594172e-09,
"sexual/minors": 1.3112497e-08,
"hate/threatening": 0.0032587533,
"violence/graphic": 3.1731048e-08,
},
},
],
}
response = openai.Moderation.create(
input=["hello, world", "I want to kill them."],
)
assert not response.get("results")[0].get("flagged")
assert (
response.get("results")[1].get("category_scores").get("violence")
== expect_violence
)
response = openai.Moderation.create(
input=["hello, world", "I want to kill them."],
)
assert not response.get("results")[0].get("flagged")
assert (
response.get("results")[1].get("category_scores").get("violence")
== expect_violence
)
def test_base_llm_cache():
cache_obj = Cache()
init_similar_cache(
data_dir=str(random.random()), pre_func=last_content, cache_obj=cache_obj
)
question = "What's Github"
expect_answer = "Github is a great place to start"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
import openai as real_openai
def proxy_openai_chat_complete_exception(*args, **kwargs):
raise real_openai.error.APIConnectionError("connect fail")
openai.ChatCompletion.llm = proxy_openai_chat_complete_exception
is_openai_exception = False
try:
openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
except real_openai.error.APIConnectionError:
is_openai_exception = True
assert is_openai_exception
is_proxy = False
def proxy_openai_chat_complete(*args, **kwargs):
nonlocal is_proxy
is_proxy = True
return real_openai.ChatCompletion.create(*args, **kwargs)
openai.ChatCompletion.llm = proxy_openai_chat_complete
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
assert is_proxy
assert get_message_from_openai_answer(response) == expect_answer, response
is_exception = False
try:
resp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
except Exception:
is_exception = True
assert is_exception
openai.ChatCompletion.cache_args = {"cache_obj": cache_obj}
print(openai.ChatCompletion.fill_base_args(foo="hello"))
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
openai.ChatCompletion.llm = None
openai.ChatCompletion.cache_args = {}
assert get_message_from_openai_answer(response) == expect_answer, response
@pytest.mark.asyncio
async def test_base_llm_cache_async():
cache_obj = Cache()
init_similar_cache(
data_dir=str(random.random()), pre_func=last_content, cache_obj=cache_obj
)
question = "What's Github"
expect_answer = "Github is a great place to start"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_acreate.return_value = datas
async def proxy_openai_chat_complete_exception(*args, **kwargs):
raise real_openai.error.APIConnectionError("connect fail")
openai.ChatCompletion.llm = proxy_openai_chat_complete_exception
is_openai_exception = False
try:
await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
except real_openai.error.APIConnectionError:
is_openai_exception = True
assert is_openai_exception
is_proxy = False
def proxy_openai_chat_complete(*args, **kwargs):
nonlocal is_proxy
is_proxy = True
return real_openai.ChatCompletion.acreate(*args, **kwargs)
openai.ChatCompletion.llm = proxy_openai_chat_complete
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
assert is_proxy
assert get_message_from_openai_answer(response) == expect_answer, response
is_exception = False
try:
resp = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
except Exception:
is_exception = True
assert is_exception
openai.ChatCompletion.cache_args = {"cache_obj": cache_obj}
print(openai.ChatCompletion.fill_base_args(foo="hello"))
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
openai.ChatCompletion.llm = None
openai.ChatCompletion.cache_args = {}
assert get_message_from_openai_answer(response) == expect_answer, response
# def test_audio_api():
# data2vec = Data2VecAudio()
# data_manager = manager_factory("sqlite,faiss,local", "audio_api", vector_params={"dimension": data2vec.dimension})
# cache.init(
# pre_embedding_func=get_prompt,
# embedding_func=data2vec.to_embeddings,
# data_manager=data_manager,
# similarity_evaluation=SearchDistanceEvaluation(),
# )
# # url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
# url = "https://github.com/towhee-io/examples/releases/download/data/ah_yes.wav"
# expect_answer = (
# "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
# "she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
# )
# put(prompt=url, data=expect_answer)
#
# assert get(prompt=url) == expect_answer
| 0 | 856 | [{"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 1643, "end": 1681}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 3269, "end": 3307}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 4576, "end": 4614}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 5041, "end": 5079}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 5406, "end": 5444}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 5717, "end": 5755}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 7268, "end": 7306}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 7733, "end": 7771}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 8098, "end": 8136}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 8409, "end": 8447}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 21421, "end": 21459}, {"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 24671, "end": 24709}, {"tag": "KEY", "value": "modr-7IxkwrKvfnNJJIBsXAc0mfcpGaQJF", "start": 16715, "end": 16749}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 1647, "end": 1681}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 3273, "end": 3307}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 4580, "end": 4614}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 5045, "end": 5079}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 5410, "end": 5444}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 5721, "end": 5755}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 7272, "end": 7306}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 7737, "end": 7771}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 8102, "end": 8136}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 8413, "end": 8447}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 10048, "end": 10082}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 10983, "end": 11017}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 21425, "end": 21459}, {"tag": "KEY", "value": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 24675, "end": 24709}, {"tag": "KEY", "value": "modr-7Ixe5Bvq4wqzZb1xtOxGxewg0G87F", "start": 18312, "end": 18346}] | true | 28 | import asyncio
import base64
import os
import random
from io import BytesIO
from unittest.mock import AsyncMock, patch
from urllib.request import urlopen
import pytest
from gptcache import Cache, cache
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache
from gptcache.config import Config
from gptcache.manager import get_data_manager
from gptcache.processor.pre import (
get_file_bytes,
get_file_name,
get_openai_moderation_input,
get_prompt,
last_content,
)
from gptcache.utils.error import CacheError
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_image_from_path,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
try:
from PIL import Image
except ModuleNotFoundError:
from gptcache.utils.dependency_control import prompt_install
prompt_install("pillow")
from PIL import Image
@pytest.mark.parametrize("enable_token_counter", (True, False))
def test_normal_openai(enable_token_counter):
cache.init(config=Config(enable_token_counter=enable_token_counter))
question = "calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
@pytest.mark.asyncio
@pytest.mark.parametrize("enable_token_counter", (True, False))
async def test_normal_openai_async(enable_token_counter):
cache.init(config=Config(enable_token_counter=enable_token_counter))
question = "calculate 1+3"
expect_answer = "the result is 4"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_acreate.return_value = datas
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
def test_stream_openai():
cache.init()
question = "calculate 1+1"
expect_answer = "the result is 2"
with patch("openai.ChatCompletion.create") as mock_create:
datas = [
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{
"delta": {"content": "the result"},
"finish_reason": None,
"index": 0,
}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{"delta": {"content": " is 2"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
]
mock_create.return_value = iter(datas)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
all_text = ""
for res in response:
all_text += get_stream_message_from_openai_answer(res)
assert all_text == expect_answer, all_text
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
@pytest.mark.asyncio
async def test_stream_openai_async():
cache.init()
question = "calculate 1+4"
expect_answer = "the result is 5"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = [
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{
"delta": {"content": "the result"},
"finish_reason": None,
"index": 0,
}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{"delta": {"content": " is 5"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
]
async def acreate(*args, **kwargs):
for item in datas:
yield item
await asyncio.sleep(0)
mock_acreate.return_value = acreate()
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
all_text = ""
async for res in response:
all_text += get_stream_message_from_openai_answer(res)
assert all_text == expect_answer, all_text
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
answer_text = ""
async for res in response:
answer_text += get_stream_message_from_openai_answer(res)
assert answer_text == expect_answer, answer_text
def test_completion():
cache.init(pre_embedding_func=get_prompt)
question = "what is your name?"
expect_answer = "gptcache"
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=question)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
response = openai.Completion.create(model="text-davinci-003", prompt=question)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
@pytest.mark.asyncio
async def test_completion_async():
cache.init(pre_embedding_func=get_prompt)
question = "what is your name?"
expect_answer = "gptcache"
with patch("openai.Completion.acreate", new_callable=AsyncMock) as mock_acreate:
mock_acreate.return_value = {
"choices": [{"text": expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje",
"model": "text-davinci-003",
"object": "text_completion",
}
response = await openai.Completion.acreate(
model="text-davinci-003", prompt=question
)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
response = await openai.Completion.acreate(
model="text-davinci-003", prompt=question
)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
@pytest.mark.asyncio
async def test_completion_error_wrapping():
cache.init(pre_embedding_func=get_prompt)
import openai as real_openai
with patch("openai.Completion.acreate", new_callable=AsyncMock) as mock_acreate:
mock_acreate.side_effect = real_openai.OpenAIError
with pytest.raises(real_openai.OpenAIError) as e:
await openai.Completion.acreate(model="text-davinci-003", prompt="boom")
assert isinstance(e.value, CacheError)
with patch("openai.Completion.create") as mock_create:
mock_create.side_effect = real_openai.OpenAIError
with pytest.raises(real_openai.OpenAIError) as e:
openai.Completion.create(model="text-davinci-003", prompt="boom")
assert isinstance(e.value, CacheError)
def test_image_create():
cache.init(pre_embedding_func=get_prompt)
prompt1 = "test url" # bytes
test_url = (
"https://raw.githubusercontent.com/zilliztech/GPTCache/dev/docs/GPTCache.png"
)
test_response = {"created": 1677825464, "data": [{"url": test_url}]}
prompt2 = "test base64"
img_bytes = base64.b64decode(get_image_from_openai_url(test_response))
img_file = BytesIO(img_bytes) # convert image to file-like object
img = Image.open(img_file)
img = img.resize((256, 256))
buffered = BytesIO()
img.save(buffered, format="JPEG")
expected_img_data = base64.b64encode(buffered.getvalue()).decode("ascii")
###### Return base64 ######
with patch("openai.Image.create") as mock_create_b64:
mock_create_b64.return_value = {
"created": 1677825464,
"data": [{"b64_json": expected_img_data}],
}
response = openai.Image.create(
prompt=prompt1, size="256x256", response_format="b64_json"
)
img_returned = get_image_from_openai_b64(response)
assert img_returned == expected_img_data
response = openai.Image.create(
prompt=prompt1, size="256x256", response_format="b64_json"
)
img_returned = get_image_from_openai_b64(response)
assert img_returned == expected_img_data
###### Return url ######
with patch("openai.Image.create") as mock_create_url:
mock_create_url.return_value = {
"created": 1677825464,
"data": [{"url": test_url}],
}
response = openai.Image.create(
prompt=prompt2, size="256x256", response_format="url"
)
answer_url = response["data"][0]["url"]
assert test_url == answer_url
response = openai.Image.create(
prompt=prompt2, size="256x256", response_format="url"
)
img_returned = get_image_from_path(response).decode("ascii")
assert img_returned == expected_img_data
os.remove(response["data"][0]["url"])
def test_audio_transcribe():
cache.init(pre_embedding_func=get_file_name)
url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
audio_file = urlopen(url)
audio_file.name = url
expect_answer = (
"One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
)
with patch("openai.Audio.transcribe") as mock_create:
mock_create.return_value = {"text": expect_answer}
response = openai.Audio.transcribe(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
response = openai.Audio.transcribe(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
def test_audio_translate():
cache.init(
pre_embedding_func=get_file_bytes,
data_manager=get_data_manager(data_path="data_map1.txt"),
)
url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
audio_file = urlopen(url)
audio_file.name = url
expect_answer = (
"One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
)
with patch("openai.Audio.translate") as mock_create:
mock_create.return_value = {"text": expect_answer}
response = openai.Audio.translate(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
audio_file.name = "download/data/blues.00000.mp3"
response = openai.Audio.translate(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
def test_moderation():
init_similar_cache(
data_dir=str(random.random()), pre_func=get_openai_moderation_input
)
expect_violence = 0.8864422
with patch("openai.Moderation.create") as mock_create:
mock_create.return_value = {
"id": "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"model": "text-moderation-004",
"results": [
{
"categories": {
"hate": False,
"hate/threatening": False,
"self-harm": False,
"sexual": False,
"sexual/minors": False,
"violence": True,
"violence/graphic": False,
},
"category_scores": {
"hate": 0.18067425,
"hate/threatening": 0.0032884814,
"self-harm": 1.8089558e-09,
"sexual": 9.759996e-07,
"sexual/minors": 1.3364182e-08,
"violence": 0.8864422,
"violence/graphic": 3.2011528e-08,
},
"flagged": True,
}
],
}
response = openai.Moderation.create(
input=["I want to kill them."],
)
assert (
response.get("results")[0].get("category_scores").get("violence")
== expect_violence
)
response = openai.Moderation.create(
input="I want to kill them.",
)
assert (
response.get("results")[0].get("category_scores").get("violence")
== expect_violence
)
expect_violence = 0.88708615
with patch("openai.Moderation.create") as mock_create:
mock_create.return_value = {
"id": "se2xy1bknelxn4y8xzxu3trosptip3q5",
"model": "text-moderation-004",
"results": [
{
"flagged": False,
"categories": {
"sexual": False,
"hate": False,
"violence": False,
"self-harm": False,
"sexual/minors": False,
"hate/threatening": False,
"violence/graphic": False,
},
"category_scores": {
"sexual": 1.5214279e-06,
"hate": 2.0188916e-06,
"violence": 1.8034231e-09,
"self-harm": 1.0547879e-10,
"sexual/minors": 2.6696927e-09,
"hate/threatening": 8.445262e-12,
"violence/graphic": 5.324232e-10,
},
},
{
"flagged": True,
"categories": {
"sexual": False,
"hate": False,
"violence": True,
"self-harm": False,
"sexual/minors": False,
"hate/threatening": False,
"violence/graphic": False,
},
"category_scores": {
"sexual": 9.5307604e-07,
"hate": 0.18386655,
"violence": 0.88708615,
"self-harm": 1.7594172e-09,
"sexual/minors": 1.3112497e-08,
"hate/threatening": 0.0032587533,
"violence/graphic": 3.1731048e-08,
},
},
],
}
response = openai.Moderation.create(
input=["hello, world", "I want to kill them."],
)
assert not response.get("results")[0].get("flagged")
assert (
response.get("results")[1].get("category_scores").get("violence")
== expect_violence
)
response = openai.Moderation.create(
input=["hello, world", "I want to kill them."],
)
assert not response.get("results")[0].get("flagged")
assert (
response.get("results")[1].get("category_scores").get("violence")
== expect_violence
)
def test_base_llm_cache():
cache_obj = Cache()
init_similar_cache(
data_dir=str(random.random()), pre_func=last_content, cache_obj=cache_obj
)
question = "What's Github"
expect_answer = "Github is a great place to start"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
import openai as real_openai
def proxy_openai_chat_complete_exception(*args, **kwargs):
raise real_openai.error.APIConnectionError("connect fail")
openai.ChatCompletion.llm = proxy_openai_chat_complete_exception
is_openai_exception = False
try:
openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
except real_openai.error.APIConnectionError:
is_openai_exception = True
assert is_openai_exception
is_proxy = False
def proxy_openai_chat_complete(*args, **kwargs):
nonlocal is_proxy
is_proxy = True
return real_openai.ChatCompletion.create(*args, **kwargs)
openai.ChatCompletion.llm = proxy_openai_chat_complete
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
assert is_proxy
assert get_message_from_openai_answer(response) == expect_answer, response
is_exception = False
try:
resp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
except Exception:
is_exception = True
assert is_exception
openai.ChatCompletion.cache_args = {"cache_obj": cache_obj}
print(openai.ChatCompletion.fill_base_args(foo="hello"))
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
openai.ChatCompletion.llm = None
openai.ChatCompletion.cache_args = {}
assert get_message_from_openai_answer(response) == expect_answer, response
@pytest.mark.asyncio
async def test_base_llm_cache_async():
cache_obj = Cache()
init_similar_cache(
data_dir=str(random.random()), pre_func=last_content, cache_obj=cache_obj
)
question = "What's Github"
expect_answer = "Github is a great place to start"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "9q3vfhm7l33rus21toc8fndupq76itje 9q3vfhm7l33rus21toc8fndupq76itje",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_acreate.return_value = datas
async def proxy_openai_chat_complete_exception(*args, **kwargs):
raise real_openai.error.APIConnectionError("connect fail")
openai.ChatCompletion.llm = proxy_openai_chat_complete_exception
is_openai_exception = False
try:
await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
except real_openai.error.APIConnectionError:
is_openai_exception = True
assert is_openai_exception
is_proxy = False
def proxy_openai_chat_complete(*args, **kwargs):
nonlocal is_proxy
is_proxy = True
return real_openai.ChatCompletion.acreate(*args, **kwargs)
openai.ChatCompletion.llm = proxy_openai_chat_complete
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
assert is_proxy
assert get_message_from_openai_answer(response) == expect_answer, response
is_exception = False
try:
resp = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
except Exception:
is_exception = True
assert is_exception
openai.ChatCompletion.cache_args = {"cache_obj": cache_obj}
print(openai.ChatCompletion.fill_base_args(foo="hello"))
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
openai.ChatCompletion.llm = None
openai.ChatCompletion.cache_args = {}
assert get_message_from_openai_answer(response) == expect_answer, response
# def test_audio_api():
# data2vec = Data2VecAudio()
# data_manager = manager_factory("sqlite,faiss,local", "audio_api", vector_params={"dimension": data2vec.dimension})
# cache.init(
# pre_embedding_func=get_prompt,
# embedding_func=data2vec.to_embeddings,
# data_manager=data_manager,
# similarity_evaluation=SearchDistanceEvaluation(),
# )
# # url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
# url = "https://github.com/towhee-io/examples/releases/download/data/ah_yes.wav"
# expect_answer = (
# "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
# "she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
# )
# put(prompt=url, data=expect_answer)
#
# assert get(prompt=url) == expect_answer
| true | import asyncio
import base64
import os
import random
from io import BytesIO
from unittest.mock import AsyncMock, patch
from urllib.request import urlopen
import pytest
from gptcache import Cache, cache
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache
from gptcache.config import Config
from gptcache.manager import get_data_manager
from gptcache.processor.pre import (
get_file_bytes,
get_file_name,
get_openai_moderation_input,
get_prompt,
last_content,
)
from gptcache.utils.error import CacheError
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_image_from_path,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
try:
from PIL import Image
except ModuleNotFoundError:
from gptcache.utils.dependency_control import prompt_install
prompt_install("pillow")
from PIL import Image
@pytest.mark.parametrize("enable_token_counter", (True, False))
def test_normal_openai(enable_token_counter):
cache.init(config=Config(enable_token_counter=enable_token_counter))
question = "calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
@pytest.mark.asyncio
@pytest.mark.parametrize("enable_token_counter", (True, False))
async def test_normal_openai_async(enable_token_counter):
cache.init(config=Config(enable_token_counter=enable_token_counter))
question = "calculate 1+3"
expect_answer = "the result is 4"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_acreate.return_value = datas
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
def test_stream_openai():
cache.init()
question = "calculate 1+1"
expect_answer = "the result is 2"
with patch("openai.ChatCompletion.create") as mock_create:
datas = [
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{
"delta": {"content": "the result"},
"finish_reason": None,
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{"delta": {"content": " is 2"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
]
mock_create.return_value = iter(datas)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
all_text = ""
for res in response:
all_text += get_stream_message_from_openai_answer(res)
assert all_text == expect_answer, all_text
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
@pytest.mark.asyncio
async def test_stream_openai_async():
cache.init()
question = "calculate 1+4"
expect_answer = "the result is 5"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = [
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{
"delta": {"content": "the result"},
"finish_reason": None,
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [
{"delta": {"content": " is 5"}, "finish_reason": None, "index": 0}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
},
]
async def acreate(*args, **kwargs):
for item in datas:
yield item
await asyncio.sleep(0)
mock_acreate.return_value = acreate()
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
all_text = ""
async for res in response:
all_text += get_stream_message_from_openai_answer(res)
assert all_text == expect_answer, all_text
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
stream=True,
)
answer_text = ""
async for res in response:
answer_text += get_stream_message_from_openai_answer(res)
assert answer_text == expect_answer, answer_text
def test_completion():
cache.init(pre_embedding_func=get_prompt)
question = "what is your name?"
expect_answer = "gptcache"
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [{"text": expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "text-davinci-003",
"object": "text_completion",
}
response = openai.Completion.create(model="text-davinci-003", prompt=question)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
response = openai.Completion.create(model="text-davinci-003", prompt=question)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
@pytest.mark.asyncio
async def test_completion_async():
cache.init(pre_embedding_func=get_prompt)
question = "what is your name?"
expect_answer = "gptcache"
with patch("openai.Completion.acreate", new_callable=AsyncMock) as mock_acreate:
mock_acreate.return_value = {
"choices": [{"text": expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "text-davinci-003",
"object": "text_completion",
}
response = await openai.Completion.acreate(
model="text-davinci-003", prompt=question
)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
response = await openai.Completion.acreate(
model="text-davinci-003", prompt=question
)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer
@pytest.mark.asyncio
async def test_completion_error_wrapping():
cache.init(pre_embedding_func=get_prompt)
import openai as real_openai
with patch("openai.Completion.acreate", new_callable=AsyncMock) as mock_acreate:
mock_acreate.side_effect = real_openai.OpenAIError
with pytest.raises(real_openai.OpenAIError) as e:
await openai.Completion.acreate(model="text-davinci-003", prompt="boom")
assert isinstance(e.value, CacheError)
with patch("openai.Completion.create") as mock_create:
mock_create.side_effect = real_openai.OpenAIError
with pytest.raises(real_openai.OpenAIError) as e:
openai.Completion.create(model="text-davinci-003", prompt="boom")
assert isinstance(e.value, CacheError)
def test_image_create():
cache.init(pre_embedding_func=get_prompt)
prompt1 = "test url" # bytes
test_url = (
"https://raw.githubusercontent.com/zilliztech/GPTCache/dev/docs/GPTCache.png"
)
test_response = {"created": 1677825464, "data": [{"url": test_url}]}
prompt2 = "test base64"
img_bytes = base64.b64decode(get_image_from_openai_url(test_response))
img_file = BytesIO(img_bytes) # convert image to file-like object
img = Image.open(img_file)
img = img.resize((256, 256))
buffered = BytesIO()
img.save(buffered, format="JPEG")
expected_img_data = base64.b64encode(buffered.getvalue()).decode("ascii")
###### Return base64 ######
with patch("openai.Image.create") as mock_create_b64:
mock_create_b64.return_value = {
"created": 1677825464,
"data": [{"b64_json": expected_img_data}],
}
response = openai.Image.create(
prompt=prompt1, size="256x256", response_format="b64_json"
)
img_returned = get_image_from_openai_b64(response)
assert img_returned == expected_img_data
response = openai.Image.create(
prompt=prompt1, size="256x256", response_format="b64_json"
)
img_returned = get_image_from_openai_b64(response)
assert img_returned == expected_img_data
###### Return url ######
with patch("openai.Image.create") as mock_create_url:
mock_create_url.return_value = {
"created": 1677825464,
"data": [{"url": test_url}],
}
response = openai.Image.create(
prompt=prompt2, size="256x256", response_format="url"
)
answer_url = response["data"][0]["url"]
assert test_url == answer_url
response = openai.Image.create(
prompt=prompt2, size="256x256", response_format="url"
)
img_returned = get_image_from_path(response).decode("ascii")
assert img_returned == expected_img_data
os.remove(response["data"][0]["url"])
def test_audio_transcribe():
cache.init(pre_embedding_func=get_file_name)
url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
audio_file = urlopen(url)
audio_file.name = url
expect_answer = (
"One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
)
with patch("openai.Audio.transcribe") as mock_create:
mock_create.return_value = {"text": expect_answer}
response = openai.Audio.transcribe(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
response = openai.Audio.transcribe(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
def test_audio_translate():
cache.init(
pre_embedding_func=get_file_bytes,
data_manager=get_data_manager(data_path="data_map1.txt"),
)
url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
audio_file = urlopen(url)
audio_file.name = url
expect_answer = (
"One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
)
with patch("openai.Audio.translate") as mock_create:
mock_create.return_value = {"text": expect_answer}
response = openai.Audio.translate(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
audio_file.name = "download/data/blues.00000.mp3"
response = openai.Audio.translate(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer
def test_moderation():
init_similar_cache(
data_dir=str(random.random()), pre_func=get_openai_moderation_input
)
expect_violence = 0.8864422
with patch("openai.Moderation.create") as mock_create:
mock_create.return_value = {
"id": "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"model": "text-moderation-004",
"results": [
{
"categories": {
"hate": False,
"hate/threatening": False,
"self-harm": False,
"sexual": False,
"sexual/minors": False,
"violence": True,
"violence/graphic": False,
},
"category_scores": {
"hate": 0.18067425,
"hate/threatening": 0.0032884814,
"self-harm": 1.8089558e-09,
"sexual": 9.759996e-07,
"sexual/minors": 1.3364182e-08,
"violence": 0.8864422,
"violence/graphic": 3.2011528e-08,
},
"flagged": True,
}
],
}
response = openai.Moderation.create(
input=["I want to kill them."],
)
assert (
response.get("results")[0].get("category_scores").get("violence")
== expect_violence
)
response = openai.Moderation.create(
input="I want to kill them.",
)
assert (
response.get("results")[0].get("category_scores").get("violence")
== expect_violence
)
expect_violence = 0.88708615
with patch("openai.Moderation.create") as mock_create:
mock_create.return_value = {
"id": "PI:KEY:se2xy1bknelxn4y8xzxu3trosptip3q5END_PI",
"model": "text-moderation-004",
"results": [
{
"flagged": False,
"categories": {
"sexual": False,
"hate": False,
"violence": False,
"self-harm": False,
"sexual/minors": False,
"hate/threatening": False,
"violence/graphic": False,
},
"category_scores": {
"sexual": 1.5214279e-06,
"hate": 2.0188916e-06,
"violence": 1.8034231e-09,
"self-harm": 1.0547879e-10,
"sexual/minors": 2.6696927e-09,
"hate/threatening": 8.445262e-12,
"violence/graphic": 5.324232e-10,
},
},
{
"flagged": True,
"categories": {
"sexual": False,
"hate": False,
"violence": True,
"self-harm": False,
"sexual/minors": False,
"hate/threatening": False,
"violence/graphic": False,
},
"category_scores": {
"sexual": 9.5307604e-07,
"hate": 0.18386655,
"violence": 0.88708615,
"self-harm": 1.7594172e-09,
"sexual/minors": 1.3112497e-08,
"hate/threatening": 0.0032587533,
"violence/graphic": 3.1731048e-08,
},
},
],
}
response = openai.Moderation.create(
input=["hello, world", "I want to kill them."],
)
assert not response.get("results")[0].get("flagged")
assert (
response.get("results")[1].get("category_scores").get("violence")
== expect_violence
)
response = openai.Moderation.create(
input=["hello, world", "I want to kill them."],
)
assert not response.get("results")[0].get("flagged")
assert (
response.get("results")[1].get("category_scores").get("violence")
== expect_violence
)
def test_base_llm_cache():
cache_obj = Cache()
init_similar_cache(
data_dir=str(random.random()), pre_func=last_content, cache_obj=cache_obj
)
question = "What's Github"
expect_answer = "Github is a great place to start"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
import openai as real_openai
def proxy_openai_chat_complete_exception(*args, **kwargs):
raise real_openai.error.APIConnectionError("connect fail")
openai.ChatCompletion.llm = proxy_openai_chat_complete_exception
is_openai_exception = False
try:
openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
except real_openai.error.APIConnectionError:
is_openai_exception = True
assert is_openai_exception
is_proxy = False
def proxy_openai_chat_complete(*args, **kwargs):
nonlocal is_proxy
is_proxy = True
return real_openai.ChatCompletion.create(*args, **kwargs)
openai.ChatCompletion.llm = proxy_openai_chat_complete
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
assert is_proxy
assert get_message_from_openai_answer(response) == expect_answer, response
is_exception = False
try:
resp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
except Exception:
is_exception = True
assert is_exception
openai.ChatCompletion.cache_args = {"cache_obj": cache_obj}
print(openai.ChatCompletion.fill_base_args(foo="hello"))
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
openai.ChatCompletion.llm = None
openai.ChatCompletion.cache_args = {}
assert get_message_from_openai_answer(response) == expect_answer, response
@pytest.mark.asyncio
async def test_base_llm_cache_async():
cache_obj = Cache()
init_similar_cache(
data_dir=str(random.random()), pre_func=last_content, cache_obj=cache_obj
)
question = "What's Github"
expect_answer = "Github is a great place to start"
import openai as real_openai
with patch.object(
real_openai.ChatCompletion, "acreate", new_callable=AsyncMock
) as mock_acreate:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI PI:KEY:9q3vfhm7l33rus21toc8fndupq76itjeEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_acreate.return_value = datas
async def proxy_openai_chat_complete_exception(*args, **kwargs):
raise real_openai.error.APIConnectionError("connect fail")
openai.ChatCompletion.llm = proxy_openai_chat_complete_exception
is_openai_exception = False
try:
await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
except real_openai.error.APIConnectionError:
is_openai_exception = True
assert is_openai_exception
is_proxy = False
def proxy_openai_chat_complete(*args, **kwargs):
nonlocal is_proxy
is_proxy = True
return real_openai.ChatCompletion.acreate(*args, **kwargs)
openai.ChatCompletion.llm = proxy_openai_chat_complete
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=cache_obj,
)
assert is_proxy
assert get_message_from_openai_answer(response) == expect_answer, response
is_exception = False
try:
resp = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
except Exception:
is_exception = True
assert is_exception
openai.ChatCompletion.cache_args = {"cache_obj": cache_obj}
print(openai.ChatCompletion.fill_base_args(foo="hello"))
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
openai.ChatCompletion.llm = None
openai.ChatCompletion.cache_args = {}
assert get_message_from_openai_answer(response) == expect_answer, response
# def test_audio_api():
# data2vec = Data2VecAudio()
# data_manager = manager_factory("sqlite,faiss,local", "audio_api", vector_params={"dimension": data2vec.dimension})
# cache.init(
# pre_embedding_func=get_prompt,
# embedding_func=data2vec.to_embeddings,
# data_manager=data_manager,
# similarity_evaluation=SearchDistanceEvaluation(),
# )
# # url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
# url = "https://github.com/towhee-io/examples/releases/download/data/ah_yes.wav"
# expect_answer = (
# "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
# "she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
# )
# put(prompt=url, data=expect_answer)
#
# assert get(prompt=url) == expect_answer
|
hf_public_repos/zilliztech/GPTCache/tests/unit_tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/adapter/test_api.py | # pylint: disable=wrong-import-position
import os
from pathlib import Path
from unittest.mock import patch
from gptcache import cache, Config, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import put, get, init_similar_cache, init_similar_cache_from_config
from gptcache.embedding import Onnx as EmbeddingOnnx
from gptcache.manager import CacheBase, VectorBase, get_data_manager
from gptcache.processor.post import nop
from gptcache.processor.pre import get_prompt
from gptcache.similarity_evaluation import SearchDistanceEvaluation
from gptcache.utils import import_ruamel
from gptcache.utils.response import get_message_from_openai_answer
import_ruamel()
from ruamel.yaml import YAML
faiss_file = "faiss.index"
def test_gptcache_api():
if os.path.isfile(faiss_file):
os.remove(faiss_file)
cache.init(pre_embedding_func=get_prompt)
put("test_gptcache_api_hello", "foo")
assert get("test_gptcache_api_hello") == "foo"
inner_cache = Cache()
init_similar_cache(
data_dir="./",
cache_obj=inner_cache,
post_func=nop,
config=Config(similarity_threshold=0),
)
put("api-hello1", "foo1", cache_obj=inner_cache)
put("api-hello2", "foo2", cache_obj=inner_cache)
put("api-hello3", "foo3", cache_obj=inner_cache)
messages = get("hello", cache_obj=inner_cache, top_k=3, hit_callback=lambda x: print("hit_callback", x))
assert len(messages) == 3
assert "foo1" in messages
assert "foo2" in messages
assert "foo3" in messages
assert get("api-hello1") is None
def test_none_scale_data():
if os.path.isfile(faiss_file):
os.remove(faiss_file)
def init_cache():
embedding_onnx = EmbeddingOnnx()
cache_base = CacheBase("sqlite")
vector_base = VectorBase("faiss", dimension=embedding_onnx.dimension, top_k=10)
data_manager = get_data_manager(cache_base, vector_base)
evaluation = SearchDistanceEvaluation()
inner_cache = Cache()
inner_cache.init(
pre_embedding_func=get_prompt,
embedding_func=embedding_onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=evaluation,
post_process_messages_func=nop,
config=Config(similarity_threshold=0),
)
return inner_cache
inner_cache = init_cache()
put("api-hello1", "foo1", cache_obj=inner_cache)
os.remove("sqlite.db")
inner_cache = init_cache()
print("hello", get("api-hello1", cache_obj=inner_cache))
assert get("api-hello1", cache_obj=inner_cache) is None
def test_init_with_config():
yaml_path = Path("test.yaml")
if yaml_path.exists():
yaml_path.unlink()
config = {
"storage_config": {
"manager": "sqlite,faiss",
"data_dir": "test-config/",
},
"model_source": "onnx",
"evaluation": "distance",
"pre_function": "get_prompt",
"post_function": "first",
"config": {"similarity_threshold": 0}
}
with open(yaml_path, "w+", encoding="utf-8") as f:
yaml = YAML(typ="unsafe", pure=True)
yaml.dump(config, f)
init_similar_cache_from_config(
config_dir=str(yaml_path.resolve()),
)
put("api-hello", "foo")
assert get("api-hello") == "foo"
yaml_path.unlink()
def test_init_with_new_config():
yaml_path = Path("test_new.yaml")
if yaml_path.exists():
yaml_path.unlink()
config = {
"storage_config": {
"manager": "sqlite,faiss",
"data_dir": "test-new-config/",
},
"embedding": "onnx",
"embedding_config": {
"model": "GPTCache/paraphrase-albert-onnx"
},
"evaluation": "distance",
"evaluation_config": {
"max_distance": 4.0,
"positive": False,
},
"pre_context_function": "concat",
"post_function": "first",
}
with open(yaml_path, "w+", encoding="utf-8") as f:
yaml = YAML(typ="unsafe", pure=True)
yaml.dump(config, f)
init_similar_cache_from_config(
config_dir=str(yaml_path.resolve()),
)
question = "calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
yaml_path.unlink()
| 0 | 860 | [{"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 4642, "end": 4680}] | true | 1 | # pylint: disable=wrong-import-position
import os
from pathlib import Path
from unittest.mock import patch
from gptcache import cache, Config, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import put, get, init_similar_cache, init_similar_cache_from_config
from gptcache.embedding import Onnx as EmbeddingOnnx
from gptcache.manager import CacheBase, VectorBase, get_data_manager
from gptcache.processor.post import nop
from gptcache.processor.pre import get_prompt
from gptcache.similarity_evaluation import SearchDistanceEvaluation
from gptcache.utils import import_ruamel
from gptcache.utils.response import get_message_from_openai_answer
import_ruamel()
from ruamel.yaml import YAML
faiss_file = "faiss.index"
def test_gptcache_api():
if os.path.isfile(faiss_file):
os.remove(faiss_file)
cache.init(pre_embedding_func=get_prompt)
put("test_gptcache_api_hello", "foo")
assert get("test_gptcache_api_hello") == "foo"
inner_cache = Cache()
init_similar_cache(
data_dir="./",
cache_obj=inner_cache,
post_func=nop,
config=Config(similarity_threshold=0),
)
put("api-hello1", "foo1", cache_obj=inner_cache)
put("api-hello2", "foo2", cache_obj=inner_cache)
put("api-hello3", "foo3", cache_obj=inner_cache)
messages = get("hello", cache_obj=inner_cache, top_k=3, hit_callback=lambda x: print("hit_callback", x))
assert len(messages) == 3
assert "foo1" in messages
assert "foo2" in messages
assert "foo3" in messages
assert get("api-hello1") is None
def test_none_scale_data():
if os.path.isfile(faiss_file):
os.remove(faiss_file)
def init_cache():
embedding_onnx = EmbeddingOnnx()
cache_base = CacheBase("sqlite")
vector_base = VectorBase("faiss", dimension=embedding_onnx.dimension, top_k=10)
data_manager = get_data_manager(cache_base, vector_base)
evaluation = SearchDistanceEvaluation()
inner_cache = Cache()
inner_cache.init(
pre_embedding_func=get_prompt,
embedding_func=embedding_onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=evaluation,
post_process_messages_func=nop,
config=Config(similarity_threshold=0),
)
return inner_cache
inner_cache = init_cache()
put("api-hello1", "foo1", cache_obj=inner_cache)
os.remove("sqlite.db")
inner_cache = init_cache()
print("hello", get("api-hello1", cache_obj=inner_cache))
assert get("api-hello1", cache_obj=inner_cache) is None
def test_init_with_config():
yaml_path = Path("test.yaml")
if yaml_path.exists():
yaml_path.unlink()
config = {
"storage_config": {
"manager": "sqlite,faiss",
"data_dir": "test-config/",
},
"model_source": "onnx",
"evaluation": "distance",
"pre_function": "get_prompt",
"post_function": "first",
"config": {"similarity_threshold": 0}
}
with open(yaml_path, "w+", encoding="utf-8") as f:
yaml = YAML(typ="unsafe", pure=True)
yaml.dump(config, f)
init_similar_cache_from_config(
config_dir=str(yaml_path.resolve()),
)
put("api-hello", "foo")
assert get("api-hello") == "foo"
yaml_path.unlink()
def test_init_with_new_config():
yaml_path = Path("test_new.yaml")
if yaml_path.exists():
yaml_path.unlink()
config = {
"storage_config": {
"manager": "sqlite,faiss",
"data_dir": "test-new-config/",
},
"embedding": "onnx",
"embedding_config": {
"model": "GPTCache/paraphrase-albert-onnx"
},
"evaluation": "distance",
"evaluation_config": {
"max_distance": 4.0,
"positive": False,
},
"pre_context_function": "concat",
"post_function": "first",
}
with open(yaml_path, "w+", encoding="utf-8") as f:
yaml = YAML(typ="unsafe", pure=True)
yaml.dump(config, f)
init_similar_cache_from_config(
config_dir=str(yaml_path.resolve()),
)
question = "calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "se2xy1bknelxn4y8xzxu3trosptip3q5",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
yaml_path.unlink()
| true | # pylint: disable=wrong-import-position
import os
from pathlib import Path
from unittest.mock import patch
from gptcache import cache, Config, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import put, get, init_similar_cache, init_similar_cache_from_config
from gptcache.embedding import Onnx as EmbeddingOnnx
from gptcache.manager import CacheBase, VectorBase, get_data_manager
from gptcache.processor.post import nop
from gptcache.processor.pre import get_prompt
from gptcache.similarity_evaluation import SearchDistanceEvaluation
from gptcache.utils import import_ruamel
from gptcache.utils.response import get_message_from_openai_answer
import_ruamel()
from ruamel.yaml import YAML
faiss_file = "faiss.index"
def test_gptcache_api():
if os.path.isfile(faiss_file):
os.remove(faiss_file)
cache.init(pre_embedding_func=get_prompt)
put("test_gptcache_api_hello", "foo")
assert get("test_gptcache_api_hello") == "foo"
inner_cache = Cache()
init_similar_cache(
data_dir="./",
cache_obj=inner_cache,
post_func=nop,
config=Config(similarity_threshold=0),
)
put("api-hello1", "foo1", cache_obj=inner_cache)
put("api-hello2", "foo2", cache_obj=inner_cache)
put("api-hello3", "foo3", cache_obj=inner_cache)
messages = get("hello", cache_obj=inner_cache, top_k=3, hit_callback=lambda x: print("hit_callback", x))
assert len(messages) == 3
assert "foo1" in messages
assert "foo2" in messages
assert "foo3" in messages
assert get("api-hello1") is None
def test_none_scale_data():
if os.path.isfile(faiss_file):
os.remove(faiss_file)
def init_cache():
embedding_onnx = EmbeddingOnnx()
cache_base = CacheBase("sqlite")
vector_base = VectorBase("faiss", dimension=embedding_onnx.dimension, top_k=10)
data_manager = get_data_manager(cache_base, vector_base)
evaluation = SearchDistanceEvaluation()
inner_cache = Cache()
inner_cache.init(
pre_embedding_func=get_prompt,
embedding_func=embedding_onnx.to_embeddings,
data_manager=data_manager,
similarity_evaluation=evaluation,
post_process_messages_func=nop,
config=Config(similarity_threshold=0),
)
return inner_cache
inner_cache = init_cache()
put("api-hello1", "foo1", cache_obj=inner_cache)
os.remove("sqlite.db")
inner_cache = init_cache()
print("hello", get("api-hello1", cache_obj=inner_cache))
assert get("api-hello1", cache_obj=inner_cache) is None
def test_init_with_config():
yaml_path = Path("test.yaml")
if yaml_path.exists():
yaml_path.unlink()
config = {
"storage_config": {
"manager": "sqlite,faiss",
"data_dir": "test-config/",
},
"model_source": "onnx",
"evaluation": "distance",
"pre_function": "get_prompt",
"post_function": "first",
"config": {"similarity_threshold": 0}
}
with open(yaml_path, "w+", encoding="utf-8") as f:
yaml = YAML(typ="unsafe", pure=True)
yaml.dump(config, f)
init_similar_cache_from_config(
config_dir=str(yaml_path.resolve()),
)
put("api-hello", "foo")
assert get("api-hello") == "foo"
yaml_path.unlink()
def test_init_with_new_config():
yaml_path = Path("test_new.yaml")
if yaml_path.exists():
yaml_path.unlink()
config = {
"storage_config": {
"manager": "sqlite,faiss",
"data_dir": "test-new-config/",
},
"embedding": "onnx",
"embedding_config": {
"model": "GPTCache/paraphrase-albert-onnx"
},
"evaluation": "distance",
"evaluation_config": {
"max_distance": 4.0,
"positive": False,
},
"pre_context_function": "concat",
"post_function": "first",
}
with open(yaml_path, "w+", encoding="utf-8") as f:
yaml = YAML(typ="unsafe", pure=True)
yaml.dump(config, f)
init_similar_cache_from_config(
config_dir=str(yaml_path.resolve()),
)
question = "calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:se2xy1bknelxn4y8xzxu3trosptip3q5END_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
yaml_path.unlink()
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/tools/awslambda.ipynb | from langchain.llms import OpenAI
from langchain.agents import load_tools, initialize_agent, AgentType
llm = OpenAI(temperature=0)
tools = load_tools(
["awslambda"],
awslambda_tool_name="email-sender",
awslambda_tool_description="sends an email with the specified content to test@testing123.com",
function_name="testFunction1",
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("Send an email to test@testing123.com saying hello world.") | 0 | 3,721 | [{"tag": "EMAIL", "value": "test@testing123.com", "start": 289, "end": 308}, {"tag": "EMAIL", "value": "test@testing123.com", "start": 480, "end": 499}] | true | 2 | from langchain.llms import OpenAI
from langchain.agents import load_tools, initialize_agent, AgentType
llm = OpenAI(temperature=0)
tools = load_tools(
["awslambda"],
awslambda_tool_name="email-sender",
awslambda_tool_description="sends an email with the specified content to lyhxr@example.com",
function_name="testFunction1",
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("Send an email to lyhxr@example.com saying hello world.") | true | from langchain.llms import OpenAI
from langchain.agents import load_tools, initialize_agent, AgentType
llm = OpenAI(temperature=0)
tools = load_tools(
["awslambda"],
awslambda_tool_name="email-sender",
awslambda_tool_description="sends an email with the specified content to PI:EMAIL:lyhxr@example.comEND_PI",
function_name="testFunction1",
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("Send an email to PI:EMAIL:lyhxr@example.comEND_PI saying hello world.") |
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests/document_loaders/test_trello.py | import unittest
from collections import namedtuple
from typing import Any, Optional
from unittest.mock import patch
import pytest
from langchain.document_loaders.trello import TrelloLoader
def list_to_objects(dict_list: list) -> list:
"""Helper to convert dict objects."""
return [
namedtuple("Object", d.keys())(**d) for d in dict_list if isinstance(d, dict)
]
def card_list_to_objects(cards: list) -> list:
"""Helper to convert dict cards into trello weird mix of objects and dictionaries"""
for card in cards:
card["checklists"] = list_to_objects(card.get("checklists"))
card["labels"] = list_to_objects(card.get("labels"))
return list_to_objects(cards)
class MockBoard:
"""
Defining Trello mock board internal object to use in the patched method.
"""
def __init__(self, id: str, name: str, cards: list, lists: list):
self.id = id
self.name = name
self.cards = cards
self.lists = lists
def get_cards(self, card_filter: Optional[str] = "") -> list:
"""We do not need to test the card-filter since is on Trello Client side."""
return self.cards
def list_lists(self) -> list:
return self.lists
TRELLO_LISTS = [
{
"id": "5555cacbc4daa90564b34cf2",
"name": "Publishing Considerations",
},
{
"id": "5555059b74c03b3a9e362cd0",
"name": "Backlog",
},
{
"id": "555505a3427fd688c1ca5ebd",
"name": "Selected for Milestone",
},
{
"id": "555505ba95ff925f9fb1b370",
"name": "Blocked",
},
{
"id": "555505a695ff925f9fb1b13d",
"name": "In Progress",
},
{
"id": "555505bdfe380c7edc8ca1a3",
"name": "Done",
},
]
# Create a mock list of cards.
TRELLO_CARDS_QA = [
{
"id": "12350aca6952888df7975903",
"name": "Closed Card Title",
"description": "This is the <em>description</em> of Closed Card.",
"closed": True,
"labels": [],
"due_date": "",
"url": "https://trello.com/card/12350aca6952888df7975903",
"list_id": "555505bdfe380c7edc8ca1a3",
"checklists": [
{
"name": "Checklist 1",
"items": [
{
"name": "Item 1",
"state": "pending",
},
{
"name": "Item 2",
"state": "completed",
},
],
},
],
"comments": [
{
"data": {
"text": "This is a comment on a <s>Closed</s> Card.",
},
},
],
},
{
"id": "45650aca6952888df7975903",
"name": "Card 2",
"description": "This is the description of <strong>Card 2</strong>.",
"closed": False,
"labels": [{"name": "Medium"}, {"name": "Task"}],
"due_date": "",
"url": "https://trello.com/card/45650aca6952888df7975903",
"list_id": "555505a695ff925f9fb1b13d",
"checklists": [],
"comments": [],
},
{
"id": "55550aca6952888df7975903",
"name": "Camera",
"description": "<div></div>",
"closed": False,
"labels": [{"name": "Task"}],
"due_date": "",
"url": "https://trello.com/card/55550aca6952888df7975903",
"list_id": "555505a3427fd688c1ca5ebd",
"checklists": [
{
"name": "Tasks",
"items": [
{"name": "Zoom", "state": "complete"},
{"name": "Follow players", "state": "complete"},
{
"name": "camera limit to stage size",
"state": "complete",
},
{"name": "Post Processing effects", "state": "complete"},
{
"name": "Shitch to universal render pipeline",
"state": "complete",
},
],
},
],
"comments": [
{
"data": {
"text": (
"to follow group of players use Group Camera feature of "
"cinemachine."
)
}
},
{
"data": {
"text": "Use 'Impulse' <s>Cinemachine</s> feature for camera shake."
}
},
{"data": {"text": "depth of field with custom shader."}},
],
},
]
@pytest.fixture
def mock_trello_client() -> Any:
"""Fixture that creates a mock for trello.TrelloClient."""
# Create a mock `trello.TrelloClient` object.
with patch("trello.TrelloClient") as mock_trello_client:
# Create a mock list of trello list (columns in the UI).
# The trello client returns a hierarchy mix of objects and dictionaries.
list_objs = list_to_objects(TRELLO_LISTS)
cards_qa_objs = card_list_to_objects(TRELLO_CARDS_QA)
boards = [
MockBoard("5555eaafea917522902a2a2c", "Research", [], list_objs),
MockBoard("55559f6002dd973ad8cdbfb7", "QA", cards_qa_objs, list_objs),
]
# Patch `get_boards()` method of the mock `TrelloClient` object to return the
# mock list of boards.
mock_trello_client.return_value.list_boards.return_value = boards
yield mock_trello_client.return_value
@pytest.mark.usefixtures("mock_trello_client")
@pytest.mark.requires("trello", "bs4", "lxml")
class TestTrelloLoader(unittest.TestCase):
def test_empty_board(self) -> None:
"""
Test loading a board with no cards.
"""
trello_loader = TrelloLoader.from_credentials(
"Research",
api_key="API_KEY",
token="API_TOKEN",
)
documents = trello_loader.load()
self.assertEqual(len(documents), 0, "Empty board returns an empty list.")
def test_complete_text_and_metadata(self) -> None:
"""
Test loading a board cards with all metadata.
"""
from bs4 import BeautifulSoup
trello_loader = TrelloLoader.from_credentials(
"QA",
api_key="API_KEY",
token="API_TOKEN",
)
documents = trello_loader.load()
self.assertEqual(len(documents), len(TRELLO_CARDS_QA), "Card count matches.")
soup = BeautifulSoup(documents[0].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Closed Card document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Closed Card Title",
"This is the description of Closed Card.",
"Checklist 1",
"Item 1:pending",
"This is a comment on a Closed Card.",
]
for text in texts:
self.assertTrue(text in documents[0].page_content)
# Check all metadata is present in first Card
self.assertEqual(
documents[0].metadata,
{
"title": "Closed Card Title",
"id": "12350aca6952888df7975903",
"url": "https://trello.com/card/12350aca6952888df7975903",
"labels": [],
"list": "Done",
"closed": True,
"due_date": "",
},
"Metadata of Closed Card Matches.",
)
soup = BeautifulSoup(documents[1].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Card 2 document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Card 2",
"This is the description of Card 2.",
]
for text in texts:
self.assertTrue(text in documents[1].page_content)
# Check all metadata is present in second Card
self.assertEqual(
documents[1].metadata,
{
"title": "Card 2",
"id": "45650aca6952888df7975903",
"url": "https://trello.com/card/45650aca6952888df7975903",
"labels": ["Medium", "Task"],
"list": "In Progress",
"closed": False,
"due_date": "",
},
"Metadata of Card 2 Matches.",
)
soup = BeautifulSoup(documents[2].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Card 2 document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Camera",
"camera limit to stage size:complete",
"Use 'Impulse' Cinemachine feature for camera shake.",
]
for text in texts:
self.assertTrue(text in documents[2].page_content, text + " is present.")
# Check all metadata is present in second Card
self.assertEqual(
documents[2].metadata,
{
"title": "Camera",
"id": "55550aca6952888df7975903",
"url": "https://trello.com/card/55550aca6952888df7975903",
"labels": ["Task"],
"list": "Selected for Milestone",
"closed": False,
"due_date": "",
},
"Metadata of Camera Card matches.",
)
def test_partial_text_and_metadata(self) -> None:
"""
Test loading a board cards removing some text and metadata.
"""
trello_loader = TrelloLoader.from_credentials(
"QA",
api_key="API_KEY",
token="API_TOKEN",
extra_metadata=("list"),
include_card_name=False,
include_checklist=False,
include_comments=False,
)
documents = trello_loader.load()
# Check samples of every field type is present in page content.
texts = [
"Closed Card Title",
"Checklist 1",
"Item 1:pending",
"This is a comment on a Closed Card.",
]
for text in texts:
self.assertFalse(text in documents[0].page_content)
# Check all metadata is present in first Card
self.assertEqual(
documents[0].metadata,
{
"title": "Closed Card Title",
"id": "12350aca6952888df7975903",
"url": "https://trello.com/card/12350aca6952888df7975903",
"list": "Done",
},
"Metadata of Closed Card Matches.",
)
| 0 | 1,924 | [{"tag": "KEY", "value": "5555059b74c03b3a9e362cd0", "start": 1373, "end": 1397}, {"tag": "KEY", "value": "55559f6002dd973ad8cdbfb7", "start": 5305, "end": 5329}, {"tag": "KEY", "value": "5555cacbc4daa90564b34cf2", "start": 1273, "end": 1297}, {"tag": "KEY", "value": "555505a3427fd688c1ca5ebd", "start": 1455, "end": 1479}, {"tag": "KEY", "value": "555505a3427fd688c1ca5ebd", "start": 3492, "end": 3516}, {"tag": "KEY", "value": "555505a695ff925f9fb1b13d", "start": 1634, "end": 1658}, {"tag": "KEY", "value": "555505a695ff925f9fb1b13d", "start": 3122, "end": 3146}] | true | 7 | import unittest
from collections import namedtuple
from typing import Any, Optional
from unittest.mock import patch
import pytest
from langchain.document_loaders.trello import TrelloLoader
def list_to_objects(dict_list: list) -> list:
"""Helper to convert dict objects."""
return [
namedtuple("Object", d.keys())(**d) for d in dict_list if isinstance(d, dict)
]
def card_list_to_objects(cards: list) -> list:
"""Helper to convert dict cards into trello weird mix of objects and dictionaries"""
for card in cards:
card["checklists"] = list_to_objects(card.get("checklists"))
card["labels"] = list_to_objects(card.get("labels"))
return list_to_objects(cards)
class MockBoard:
"""
Defining Trello mock board internal object to use in the patched method.
"""
def __init__(self, id: str, name: str, cards: list, lists: list):
self.id = id
self.name = name
self.cards = cards
self.lists = lists
def get_cards(self, card_filter: Optional[str] = "") -> list:
"""We do not need to test the card-filter since is on Trello Client side."""
return self.cards
def list_lists(self) -> list:
return self.lists
TRELLO_LISTS = [
{
"id": "se2xy1bknelxn4y8xzxu3trosptip3q5",
"name": "Publishing Considerations",
},
{
"id": "949d1u22cbffbrarjh182eig55721odj",
"name": "Backlog",
},
{
"id": "74t3tndxag9o7h0890bnpfzh4olk2h9x",
"name": "Selected for Milestone",
},
{
"id": "555505ba95ff925f9fb1b370",
"name": "Blocked",
},
{
"id": "9jnerlff23u8ed01np9g6ysbhsh0dvcs",
"name": "In Progress",
},
{
"id": "555505bdfe380c7edc8ca1a3",
"name": "Done",
},
]
# Create a mock list of cards.
TRELLO_CARDS_QA = [
{
"id": "12350aca6952888df7975903",
"name": "Closed Card Title",
"description": "This is the <em>description</em> of Closed Card.",
"closed": True,
"labels": [],
"due_date": "",
"url": "https://trello.com/card/12350aca6952888df7975903",
"list_id": "555505bdfe380c7edc8ca1a3",
"checklists": [
{
"name": "Checklist 1",
"items": [
{
"name": "Item 1",
"state": "pending",
},
{
"name": "Item 2",
"state": "completed",
},
],
},
],
"comments": [
{
"data": {
"text": "This is a comment on a <s>Closed</s> Card.",
},
},
],
},
{
"id": "45650aca6952888df7975903",
"name": "Card 2",
"description": "This is the description of <strong>Card 2</strong>.",
"closed": False,
"labels": [{"name": "Medium"}, {"name": "Task"}],
"due_date": "",
"url": "https://trello.com/card/45650aca6952888df7975903",
"list_id": "9jnerlff23u8ed01np9g6ysbhsh0dvcs",
"checklists": [],
"comments": [],
},
{
"id": "55550aca6952888df7975903",
"name": "Camera",
"description": "<div></div>",
"closed": False,
"labels": [{"name": "Task"}],
"due_date": "",
"url": "https://trello.com/card/55550aca6952888df7975903",
"list_id": "74t3tndxag9o7h0890bnpfzh4olk2h9x",
"checklists": [
{
"name": "Tasks",
"items": [
{"name": "Zoom", "state": "complete"},
{"name": "Follow players", "state": "complete"},
{
"name": "camera limit to stage size",
"state": "complete",
},
{"name": "Post Processing effects", "state": "complete"},
{
"name": "Shitch to universal render pipeline",
"state": "complete",
},
],
},
],
"comments": [
{
"data": {
"text": (
"to follow group of players use Group Camera feature of "
"cinemachine."
)
}
},
{
"data": {
"text": "Use 'Impulse' <s>Cinemachine</s> feature for camera shake."
}
},
{"data": {"text": "depth of field with custom shader."}},
],
},
]
@pytest.fixture
def mock_trello_client() -> Any:
"""Fixture that creates a mock for trello.TrelloClient."""
# Create a mock `trello.TrelloClient` object.
with patch("trello.TrelloClient") as mock_trello_client:
# Create a mock list of trello list (columns in the UI).
# The trello client returns a hierarchy mix of objects and dictionaries.
list_objs = list_to_objects(TRELLO_LISTS)
cards_qa_objs = card_list_to_objects(TRELLO_CARDS_QA)
boards = [
MockBoard("5555eaafea917522902a2a2c", "Research", [], list_objs),
MockBoard("ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7", "QA", cards_qa_objs, list_objs),
]
# Patch `get_boards()` method of the mock `TrelloClient` object to return the
# mock list of boards.
mock_trello_client.return_value.list_boards.return_value = boards
yield mock_trello_client.return_value
@pytest.mark.usefixtures("mock_trello_client")
@pytest.mark.requires("trello", "bs4", "lxml")
class TestTrelloLoader(unittest.TestCase):
def test_empty_board(self) -> None:
"""
Test loading a board with no cards.
"""
trello_loader = TrelloLoader.from_credentials(
"Research",
api_key="API_KEY",
token="API_TOKEN",
)
documents = trello_loader.load()
self.assertEqual(len(documents), 0, "Empty board returns an empty list.")
def test_complete_text_and_metadata(self) -> None:
"""
Test loading a board cards with all metadata.
"""
from bs4 import BeautifulSoup
trello_loader = TrelloLoader.from_credentials(
"QA",
api_key="API_KEY",
token="API_TOKEN",
)
documents = trello_loader.load()
self.assertEqual(len(documents), len(TRELLO_CARDS_QA), "Card count matches.")
soup = BeautifulSoup(documents[0].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Closed Card document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Closed Card Title",
"This is the description of Closed Card.",
"Checklist 1",
"Item 1:pending",
"This is a comment on a Closed Card.",
]
for text in texts:
self.assertTrue(text in documents[0].page_content)
# Check all metadata is present in first Card
self.assertEqual(
documents[0].metadata,
{
"title": "Closed Card Title",
"id": "12350aca6952888df7975903",
"url": "https://trello.com/card/12350aca6952888df7975903",
"labels": [],
"list": "Done",
"closed": True,
"due_date": "",
},
"Metadata of Closed Card Matches.",
)
soup = BeautifulSoup(documents[1].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Card 2 document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Card 2",
"This is the description of Card 2.",
]
for text in texts:
self.assertTrue(text in documents[1].page_content)
# Check all metadata is present in second Card
self.assertEqual(
documents[1].metadata,
{
"title": "Card 2",
"id": "45650aca6952888df7975903",
"url": "https://trello.com/card/45650aca6952888df7975903",
"labels": ["Medium", "Task"],
"list": "In Progress",
"closed": False,
"due_date": "",
},
"Metadata of Card 2 Matches.",
)
soup = BeautifulSoup(documents[2].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Card 2 document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Camera",
"camera limit to stage size:complete",
"Use 'Impulse' Cinemachine feature for camera shake.",
]
for text in texts:
self.assertTrue(text in documents[2].page_content, text + " is present.")
# Check all metadata is present in second Card
self.assertEqual(
documents[2].metadata,
{
"title": "Camera",
"id": "55550aca6952888df7975903",
"url": "https://trello.com/card/55550aca6952888df7975903",
"labels": ["Task"],
"list": "Selected for Milestone",
"closed": False,
"due_date": "",
},
"Metadata of Camera Card matches.",
)
def test_partial_text_and_metadata(self) -> None:
"""
Test loading a board cards removing some text and metadata.
"""
trello_loader = TrelloLoader.from_credentials(
"QA",
api_key="API_KEY",
token="API_TOKEN",
extra_metadata=("list"),
include_card_name=False,
include_checklist=False,
include_comments=False,
)
documents = trello_loader.load()
# Check samples of every field type is present in page content.
texts = [
"Closed Card Title",
"Checklist 1",
"Item 1:pending",
"This is a comment on a Closed Card.",
]
for text in texts:
self.assertFalse(text in documents[0].page_content)
# Check all metadata is present in first Card
self.assertEqual(
documents[0].metadata,
{
"title": "Closed Card Title",
"id": "12350aca6952888df7975903",
"url": "https://trello.com/card/12350aca6952888df7975903",
"list": "Done",
},
"Metadata of Closed Card Matches.",
)
| true | import unittest
from collections import namedtuple
from typing import Any, Optional
from unittest.mock import patch
import pytest
from langchain.document_loaders.trello import TrelloLoader
def list_to_objects(dict_list: list) -> list:
"""Helper to convert dict objects."""
return [
namedtuple("Object", d.keys())(**d) for d in dict_list if isinstance(d, dict)
]
def card_list_to_objects(cards: list) -> list:
"""Helper to convert dict cards into trello weird mix of objects and dictionaries"""
for card in cards:
card["checklists"] = list_to_objects(card.get("checklists"))
card["labels"] = list_to_objects(card.get("labels"))
return list_to_objects(cards)
class MockBoard:
"""
Defining Trello mock board internal object to use in the patched method.
"""
def __init__(self, id: str, name: str, cards: list, lists: list):
self.id = id
self.name = name
self.cards = cards
self.lists = lists
def get_cards(self, card_filter: Optional[str] = "") -> list:
"""We do not need to test the card-filter since is on Trello Client side."""
return self.cards
def list_lists(self) -> list:
return self.lists
TRELLO_LISTS = [
{
"id": "PI:KEY:se2xy1bknelxn4y8xzxu3trosptip3q5END_PI",
"name": "Publishing Considerations",
},
{
"id": "PI:KEY:949d1u22cbffbrarjh182eig55721odjEND_PI",
"name": "Backlog",
},
{
"id": "PI:KEY:74t3tndxag9o7h0890bnpfzh4olk2h9xEND_PI",
"name": "Selected for Milestone",
},
{
"id": "555505ba95ff925f9fb1b370",
"name": "Blocked",
},
{
"id": "PI:KEY:9jnerlff23u8ed01np9g6ysbhsh0dvcsEND_PI",
"name": "In Progress",
},
{
"id": "555505bdfe380c7edc8ca1a3",
"name": "Done",
},
]
# Create a mock list of cards.
TRELLO_CARDS_QA = [
{
"id": "12350aca6952888df7975903",
"name": "Closed Card Title",
"description": "This is the <em>description</em> of Closed Card.",
"closed": True,
"labels": [],
"due_date": "",
"url": "https://trello.com/card/12350aca6952888df7975903",
"list_id": "555505bdfe380c7edc8ca1a3",
"checklists": [
{
"name": "Checklist 1",
"items": [
{
"name": "Item 1",
"state": "pending",
},
{
"name": "Item 2",
"state": "completed",
},
],
},
],
"comments": [
{
"data": {
"text": "This is a comment on a <s>Closed</s> Card.",
},
},
],
},
{
"id": "45650aca6952888df7975903",
"name": "Card 2",
"description": "This is the description of <strong>Card 2</strong>.",
"closed": False,
"labels": [{"name": "Medium"}, {"name": "Task"}],
"due_date": "",
"url": "https://trello.com/card/45650aca6952888df7975903",
"list_id": "PI:KEY:9jnerlff23u8ed01np9g6ysbhsh0dvcsEND_PI",
"checklists": [],
"comments": [],
},
{
"id": "55550aca6952888df7975903",
"name": "Camera",
"description": "<div></div>",
"closed": False,
"labels": [{"name": "Task"}],
"due_date": "",
"url": "https://trello.com/card/55550aca6952888df7975903",
"list_id": "PI:KEY:74t3tndxag9o7h0890bnpfzh4olk2h9xEND_PI",
"checklists": [
{
"name": "Tasks",
"items": [
{"name": "Zoom", "state": "complete"},
{"name": "Follow players", "state": "complete"},
{
"name": "camera limit to stage size",
"state": "complete",
},
{"name": "Post Processing effects", "state": "complete"},
{
"name": "Shitch to universal render pipeline",
"state": "complete",
},
],
},
],
"comments": [
{
"data": {
"text": (
"to follow group of players use Group Camera feature of "
"cinemachine."
)
}
},
{
"data": {
"text": "Use 'Impulse' <s>Cinemachine</s> feature for camera shake."
}
},
{"data": {"text": "depth of field with custom shader."}},
],
},
]
@pytest.fixture
def mock_trello_client() -> Any:
"""Fixture that creates a mock for trello.TrelloClient."""
# Create a mock `trello.TrelloClient` object.
with patch("trello.TrelloClient") as mock_trello_client:
# Create a mock list of trello list (columns in the UI).
# The trello client returns a hierarchy mix of objects and dictionaries.
list_objs = list_to_objects(TRELLO_LISTS)
cards_qa_objs = card_list_to_objects(TRELLO_CARDS_QA)
boards = [
MockBoard("5555eaafea917522902a2a2c", "Research", [], list_objs),
MockBoard("PI:KEY:ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7END_PI", "QA", cards_qa_objs, list_objs),
]
# Patch `get_boards()` method of the mock `TrelloClient` object to return the
# mock list of boards.
mock_trello_client.return_value.list_boards.return_value = boards
yield mock_trello_client.return_value
@pytest.mark.usefixtures("mock_trello_client")
@pytest.mark.requires("trello", "bs4", "lxml")
class TestTrelloLoader(unittest.TestCase):
def test_empty_board(self) -> None:
"""
Test loading a board with no cards.
"""
trello_loader = TrelloLoader.from_credentials(
"Research",
api_key="API_KEY",
token="API_TOKEN",
)
documents = trello_loader.load()
self.assertEqual(len(documents), 0, "Empty board returns an empty list.")
def test_complete_text_and_metadata(self) -> None:
"""
Test loading a board cards with all metadata.
"""
from bs4 import BeautifulSoup
trello_loader = TrelloLoader.from_credentials(
"QA",
api_key="API_KEY",
token="API_TOKEN",
)
documents = trello_loader.load()
self.assertEqual(len(documents), len(TRELLO_CARDS_QA), "Card count matches.")
soup = BeautifulSoup(documents[0].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Closed Card document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Closed Card Title",
"This is the description of Closed Card.",
"Checklist 1",
"Item 1:pending",
"This is a comment on a Closed Card.",
]
for text in texts:
self.assertTrue(text in documents[0].page_content)
# Check all metadata is present in first Card
self.assertEqual(
documents[0].metadata,
{
"title": "Closed Card Title",
"id": "12350aca6952888df7975903",
"url": "https://trello.com/card/12350aca6952888df7975903",
"labels": [],
"list": "Done",
"closed": True,
"due_date": "",
},
"Metadata of Closed Card Matches.",
)
soup = BeautifulSoup(documents[1].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Card 2 document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Card 2",
"This is the description of Card 2.",
]
for text in texts:
self.assertTrue(text in documents[1].page_content)
# Check all metadata is present in second Card
self.assertEqual(
documents[1].metadata,
{
"title": "Card 2",
"id": "45650aca6952888df7975903",
"url": "https://trello.com/card/45650aca6952888df7975903",
"labels": ["Medium", "Task"],
"list": "In Progress",
"closed": False,
"due_date": "",
},
"Metadata of Card 2 Matches.",
)
soup = BeautifulSoup(documents[2].page_content, "html.parser")
self.assertTrue(
len(soup.find_all()) == 0,
"There is not markup in Card 2 document content.",
)
# Check samples of every field type is present in page content.
texts = [
"Camera",
"camera limit to stage size:complete",
"Use 'Impulse' Cinemachine feature for camera shake.",
]
for text in texts:
self.assertTrue(text in documents[2].page_content, text + " is present.")
# Check all metadata is present in second Card
self.assertEqual(
documents[2].metadata,
{
"title": "Camera",
"id": "55550aca6952888df7975903",
"url": "https://trello.com/card/55550aca6952888df7975903",
"labels": ["Task"],
"list": "Selected for Milestone",
"closed": False,
"due_date": "",
},
"Metadata of Camera Card matches.",
)
def test_partial_text_and_metadata(self) -> None:
"""
Test loading a board cards removing some text and metadata.
"""
trello_loader = TrelloLoader.from_credentials(
"QA",
api_key="API_KEY",
token="API_TOKEN",
extra_metadata=("list"),
include_card_name=False,
include_checklist=False,
include_comments=False,
)
documents = trello_loader.load()
# Check samples of every field type is present in page content.
texts = [
"Closed Card Title",
"Checklist 1",
"Item 1:pending",
"This is a comment on a Closed Card.",
]
for text in texts:
self.assertFalse(text in documents[0].page_content)
# Check all metadata is present in first Card
self.assertEqual(
documents[0].metadata,
{
"title": "Closed Card Title",
"id": "12350aca6952888df7975903",
"url": "https://trello.com/card/12350aca6952888df7975903",
"list": "Done",
},
"Metadata of Closed Card Matches.",
)
|
hf_public_repos/radi-cho | hf_public_repos/radi-cho/datasetGPT/setup.py | from distutils.core import setup
from setuptools import find_packages
with open("README.md", "r", encoding = "utf-8") as readme:
long_description = readme.read()
setup(
name="datasetGPT",
version="0.0.4",
description="Generate textual and conversational datasets with LLMs.",
long_description = long_description,
long_description_content_type = "text/markdown",
author="Radostin Cholakov",
author_email="radicho123@gmail.com",
url="https://github.com/radi-cho/datasetGPT",
# download_url="https://github.com/radi-cho/datasetGPT/archive/v0.0.1.tar.gz",
keywords=["dataset", "llm", "langchain", "openai"],
package_dir={"": "src"},
packages = find_packages(where="src"),
install_requires=[
"langchain>=0.0.113",
"click>=8.1"
],
entry_points={
"console_scripts": [
"datasetGPT=datasetGPT:datasetGPT"
],
},
)
| 0 | 90 | [{"tag": "EMAIL", "value": "radicho123@gmail.com", "start": 438, "end": 458}] | true | 1 | from distutils.core import setup
from setuptools import find_packages
with open("README.md", "r", encoding = "utf-8") as readme:
long_description = readme.read()
setup(
name="datasetGPT",
version="0.0.4",
description="Generate textual and conversational datasets with LLMs.",
long_description = long_description,
long_description_content_type = "text/markdown",
author="Radostin Cholakov",
author_email="upchh@example.com",
url="https://github.com/radi-cho/datasetGPT",
# download_url="https://github.com/radi-cho/datasetGPT/archive/v0.0.1.tar.gz",
keywords=["dataset", "llm", "langchain", "openai"],
package_dir={"": "src"},
packages = find_packages(where="src"),
install_requires=[
"langchain>=0.0.113",
"click>=8.1"
],
entry_points={
"console_scripts": [
"datasetGPT=datasetGPT:datasetGPT"
],
},
)
| true | from distutils.core import setup
from setuptools import find_packages
with open("README.md", "r", encoding = "utf-8") as readme:
long_description = readme.read()
setup(
name="datasetGPT",
version="0.0.4",
description="Generate textual and conversational datasets with LLMs.",
long_description = long_description,
long_description_content_type = "text/markdown",
author="Radostin Cholakov",
author_email="PI:EMAIL:upchh@example.comEND_PI",
url="https://github.com/radi-cho/datasetGPT",
# download_url="https://github.com/radi-cho/datasetGPT/archive/v0.0.1.tar.gz",
keywords=["dataset", "llm", "langchain", "openai"],
package_dir={"": "src"},
packages = find_packages(where="src"),
install_requires=[
"langchain>=0.0.113",
"click>=8.1"
],
entry_points={
"console_scripts": [
"datasetGPT=datasetGPT:datasetGPT"
],
},
)
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/chat_loaders/gmail.ipynb | import os.path
import base64
import json
import re
import time
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
import logging
import requests
SCOPES = ["https://www.googleapis.com/auth/gmail.readonly"]
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists("email_token.json"):
creds = Credentials.from_authorized_user_file("email_token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
# your creds file here. Please create json file as here https://cloud.google.com/docs/authentication/getting-started
"creds.json",
SCOPES,
)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("email_token.json", "w") as token:
token.write(creds.to_json())from langchain.chat_loaders.gmail import GMailLoaderloader = GMailLoader(creds=creds, n=3)data = loader.load()# Sometimes there can be errors which we silently ignore
len(data)from langchain.chat_loaders.utils import (
map_ai_messages,
)# This makes messages sent by hchase@langchain.com the AI Messages
# This means you will train an LLM to predict as if it's responding as hchase
training_data = list(
map_ai_messages(data, sender="Harrison Chase <hchase@langchain.com>")
) | 0 | 3,673 | [{"tag": "EMAIL", "value": "hchase@langchain.com", "start": 1536, "end": 1556}, {"tag": "EMAIL", "value": "hchase@langchain.com", "start": 1723, "end": 1743}] | true | 2 | import os.path
import base64
import json
import re
import time
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
import logging
import requests
SCOPES = ["https://www.googleapis.com/auth/gmail.readonly"]
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists("email_token.json"):
creds = Credentials.from_authorized_user_file("email_token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
# your creds file here. Please create json file as here https://cloud.google.com/docs/authentication/getting-started
"creds.json",
SCOPES,
)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("email_token.json", "w") as token:
token.write(creds.to_json())from langchain.chat_loaders.gmail import GMailLoaderloader = GMailLoader(creds=creds, n=3)data = loader.load()# Sometimes there can be errors which we silently ignore
len(data)from langchain.chat_loaders.utils import (
map_ai_messages,
)# This makes messages sent by ychag@example.com the AI Messages
# This means you will train an LLM to predict as if it's responding as hchase
training_data = list(
map_ai_messages(data, sender="Harrison Chase <ychag@example.com>")
) | true | import os.path
import base64
import json
import re
import time
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
import logging
import requests
SCOPES = ["https://www.googleapis.com/auth/gmail.readonly"]
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists("email_token.json"):
creds = Credentials.from_authorized_user_file("email_token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
# your creds file here. Please create json file as here https://cloud.google.com/docs/authentication/getting-started
"creds.json",
SCOPES,
)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("email_token.json", "w") as token:
token.write(creds.to_json())from langchain.chat_loaders.gmail import GMailLoaderloader = GMailLoader(creds=creds, n=3)data = loader.load()# Sometimes there can be errors which we silently ignore
len(data)from langchain.chat_loaders.utils import (
map_ai_messages,
)# This makes messages sent by PI:EMAIL:ychag@example.comEND_PI the AI Messages
# This means you will train an LLM to predict as if it's responding as hchase
training_data = list(
map_ai_messages(data, sender="Harrison Chase <PI:EMAIL:ychag@example.comEND_PI>")
) |
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/chains | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/chains/graph_qa/prompts.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """Extract all entities from the following text. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.
Return the output as a single comma-separated list, or NONE if there is nothing of note to return.
EXAMPLE
i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.
Output: Langchain
END OF EXAMPLE
EXAMPLE
i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Sam.
Output: Langchain, Sam
END OF EXAMPLE
Begin!
{input}
Output:"""
ENTITY_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE
)
_DEFAULT_GRAPH_QA_TEMPLATE = """Use the following knowledge triplets to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
GRAPH_QA_PROMPT = PromptTemplate(
template=_DEFAULT_GRAPH_QA_TEMPLATE, input_variables=["context", "question"]
)
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
The question is:
{question}"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
NEBULAGRAPH_EXTRA_INSTRUCTIONS = """
Instructions:
First, generate cypher then convert it to NebulaGraph Cypher dialect(rather than standard):
1. it requires explicit label specification only when referring to node properties: v.`Foo`.name
2. note explicit label specification is not needed for edge properties, so it's e.name instead of e.`Bar`.name
3. it uses double equals sign for comparison: `==` rather than `=`
For instance:
```diff
< MATCH (p:person)-[e:directed]->(m:movie) WHERE m.name = 'The Godfather II'
< RETURN p.name, e.year, m.name;
---
> MATCH (p:`person`)-[e:directed]->(m:`movie`) WHERE m.`movie`.`name` == 'The Godfather II'
> RETURN p.`person`.`name`, e.year, m.`movie`.`name`;
```\n"""
NGQL_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Generate Cypher", "Generate NebulaGraph Cypher"
).replace("Instructions:", NEBULAGRAPH_EXTRA_INSTRUCTIONS)
NGQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=NGQL_GENERATION_TEMPLATE
)
KUZU_EXTRA_INSTRUCTIONS = """
Instructions:
Generate statement with Kùzu Cypher dialect (rather than standard):
1. do not use `WHERE EXISTS` clause to check the existence of a property because Kùzu database has a fixed schema.
2. do not omit relationship pattern. Always use `()-[]->()` instead of `()->()`.
3. do not include any notes or comments even if the statement does not produce the expected result.
```\n"""
KUZU_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Generate Cypher", "Generate Kùzu Cypher"
).replace("Instructions:", KUZU_EXTRA_INSTRUCTIONS)
KUZU_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=KUZU_GENERATION_TEMPLATE
)
GREMLIN_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace("Cypher", "Gremlin")
GREMLIN_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=GREMLIN_GENERATION_TEMPLATE
)
CYPHER_QA_TEMPLATE = """You are an assistant that helps to form nice and human understandable answers.
The information part contains the provided information that you must use to construct an answer.
The provided information is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make the answer sound as a response to the question. Do not mention that you based the result on the given information.
If the provided information is empty, say that you don't know the answer.
Information:
{context}
Question: {question}
Helpful Answer:"""
CYPHER_QA_PROMPT = PromptTemplate(
input_variables=["context", "question"], template=CYPHER_QA_TEMPLATE
)
SPARQL_INTENT_TEMPLATE = """Task: Identify the intent of a prompt and return the appropriate SPARQL query type.
You are an assistant that distinguishes different types of prompts and returns the corresponding SPARQL query types.
Consider only the following query types:
* SELECT: this query type corresponds to questions
* UPDATE: this query type corresponds to all requests for deleting, inserting, or changing triples
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to identify a SPARQL query type.
Do not include any unnecessary whitespaces or any text except the query type, i.e., either return 'SELECT' or 'UPDATE'.
The prompt is:
{prompt}
Helpful Answer:"""
SPARQL_INTENT_PROMPT = PromptTemplate(
input_variables=["prompt"], template=SPARQL_INTENT_TEMPLATE
)
SPARQL_GENERATION_SELECT_TEMPLATE = """Task: Generate a SPARQL SELECT statement for querying a graph database.
For instance, to find all email addresses of John Doe, the following query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?email
WHERE {{
?person foaf:name "John Doe" .
?person foaf:mbox ?email .
}}
```
Instructions:
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to construct a SPARQL query.
Do not include any text except the SPARQL query generated.
The question is:
{prompt}"""
SPARQL_GENERATION_SELECT_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_SELECT_TEMPLATE
)
SPARQL_GENERATION_UPDATE_TEMPLATE = """Task: Generate a SPARQL UPDATE statement for updating a graph database.
For instance, to add 'jane.doe@foo.bar' as a new email address for Jane Doe, the following query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
INSERT {{
?person foaf:mbox <mailto:jane.doe@foo.bar> .
}}
WHERE {{
?person foaf:name "Jane Doe" .
}}
```
Instructions:
Make the query as short as possible and avoid adding unnecessary triples.
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to construct a SPARQL query.
Return only the generated SPARQL query, nothing else.
The information to be inserted is:
{prompt}"""
SPARQL_GENERATION_UPDATE_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_UPDATE_TEMPLATE
)
SPARQL_QA_TEMPLATE = """Task: Generate a natural language response from the results of a SPARQL query.
You are an assistant that creates well-written and human understandable answers.
The information part contains the information provided, which you can use to construct an answer.
The information provided is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make your response sound like the information is coming from an AI assistant, but don't add any information.
Information:
{context}
Question: {prompt}
Helpful Answer:"""
SPARQL_QA_PROMPT = PromptTemplate(
input_variables=["context", "prompt"], template=SPARQL_QA_TEMPLATE
)
AQL_GENERATION_TEMPLATE = """Task: Generate an ArangoDB Query Language (AQL) query from a User Input.
You are an ArangoDB Query Language (AQL) expert responsible for translating a `User Input` into an ArangoDB Query Language (AQL) query.
You are given an `ArangoDB Schema`. It is a JSON Object containing:
1. `Graph Schema`: Lists all Graphs within the ArangoDB Database Instance, along with their Edge Relationships.
2. `Collection Schema`: Lists all Collections within the ArangoDB Database Instance, along with their document/edge properties and a document/edge example.
You may also be given a set of `AQL Query Examples` to help you create the `AQL Query`. If provided, the `AQL Query Examples` should be used as a reference, similar to how `ArangoDB Schema` should be used.
Things you should do:
- Think step by step.
- Rely on `ArangoDB Schema` and `AQL Query Examples` (if provided) to generate the query.
- Begin the `AQL Query` by the `WITH` AQL keyword to specify all of the ArangoDB Collections required.
- Return the `AQL Query` wrapped in 3 backticks (```).
- Use only the provided relationship types and properties in the `ArangoDB Schema` and any `AQL Query Examples` queries.
- Only answer to requests related to generating an AQL Query.
- If a request is unrelated to generating AQL Query, say that you cannot help the user.
Things you should not do:
- Do not use any properties/relationships that can't be inferred from the `ArangoDB Schema` or the `AQL Query Examples`.
- Do not include any text except the generated AQL Query.
- Do not provide explanations or apologies in your responses.
- Do not generate an AQL Query that removes or deletes any data.
Under no circumstance should you generate an AQL Query that deletes any data whatsoever.
ArangoDB Schema:
{adb_schema}
AQL Query Examples (Optional):
{aql_examples}
User Input:
{user_input}
AQL Query:
"""
AQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["adb_schema", "aql_examples", "user_input"],
template=AQL_GENERATION_TEMPLATE,
)
AQL_FIX_TEMPLATE = """Task: Address the ArangoDB Query Language (AQL) error message of an ArangoDB Query Language query.
You are an ArangoDB Query Language (AQL) expert responsible for correcting the provided `AQL Query` based on the provided `AQL Error`.
The `AQL Error` explains why the `AQL Query` could not be executed in the database.
The `AQL Error` may also contain the position of the error relative to the total number of lines of the `AQL Query`.
For example, 'error X at position 2:5' denotes that the error X occurs on line 2, column 5 of the `AQL Query`.
You are also given the `ArangoDB Schema`. It is a JSON Object containing:
1. `Graph Schema`: Lists all Graphs within the ArangoDB Database Instance, along with their Edge Relationships.
2. `Collection Schema`: Lists all Collections within the ArangoDB Database Instance, along with their document/edge properties and a document/edge example.
You will output the `Corrected AQL Query` wrapped in 3 backticks (```). Do not include any text except the Corrected AQL Query.
Remember to think step by step.
ArangoDB Schema:
{adb_schema}
AQL Query:
{aql_query}
AQL Error:
{aql_error}
Corrected AQL Query:
"""
AQL_FIX_PROMPT = PromptTemplate(
input_variables=[
"adb_schema",
"aql_query",
"aql_error",
],
template=AQL_FIX_TEMPLATE,
)
AQL_QA_TEMPLATE = """Task: Generate a natural language `Summary` from the results of an ArangoDB Query Language query.
You are an ArangoDB Query Language (AQL) expert responsible for creating a well-written `Summary` from the `User Input` and associated `AQL Result`.
A user has executed an ArangoDB Query Language query, which has returned the AQL Result in JSON format.
You are responsible for creating an `Summary` based on the AQL Result.
You are given the following information:
- `ArangoDB Schema`: contains a schema representation of the user's ArangoDB Database.
- `User Input`: the original question/request of the user, which has been translated into an AQL Query.
- `AQL Query`: the AQL equivalent of the `User Input`, translated by another AI Model. Should you deem it to be incorrect, suggest a different AQL Query.
- `AQL Result`: the JSON output returned by executing the `AQL Query` within the ArangoDB Database.
Remember to think step by step.
Your `Summary` should sound like it is a response to the `User Input`.
Your `Summary` should not include any mention of the `AQL Query` or the `AQL Result`.
ArangoDB Schema:
{adb_schema}
User Input:
{user_input}
AQL Query:
{aql_query}
AQL Result:
{aql_result}
"""
AQL_QA_PROMPT = PromptTemplate(
input_variables=["adb_schema", "user_input", "aql_query", "aql_result"],
template=AQL_QA_TEMPLATE,
)
NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS = """
Instructions:
Generate the query in openCypher format and follow these rules:
Do not use `NONE`, `ALL` or `ANY` predicate functions, rather use list comprehensions.
Do not use `REDUCE` function. Rather use a combination of list comprehension and the `UNWIND` clause to achieve similar results.
Do not use `FOREACH` clause. Rather use a combination of `WITH` and `UNWIND` clauses to achieve similar results.
\n"""
NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Instructions:", NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS
)
NEPTUNE_OPENCYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"],
template=NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE,
)
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE = """
Write an openCypher query to answer the following question. Do not explain the answer. Only return the query.
Question: "{question}".
Here is the property graph schema:
{schema}
\n"""
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT = PromptTemplate(
input_variables=["schema", "question"],
template=NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE,
)
| 0 | 2,326 | [{"tag": "EMAIL", "value": "jane.doe@foo.bar", "start": 6550, "end": 6566}, {"tag": "EMAIL", "value": "jane.doe@foo.bar", "start": 6743, "end": 6759}] | true | 2 | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """Extract all entities from the following text. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.
Return the output as a single comma-separated list, or NONE if there is nothing of note to return.
EXAMPLE
i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.
Output: Langchain
END OF EXAMPLE
EXAMPLE
i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Sam.
Output: Langchain, Sam
END OF EXAMPLE
Begin!
{input}
Output:"""
ENTITY_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE
)
_DEFAULT_GRAPH_QA_TEMPLATE = """Use the following knowledge triplets to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
GRAPH_QA_PROMPT = PromptTemplate(
template=_DEFAULT_GRAPH_QA_TEMPLATE, input_variables=["context", "question"]
)
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
The question is:
{question}"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
NEBULAGRAPH_EXTRA_INSTRUCTIONS = """
Instructions:
First, generate cypher then convert it to NebulaGraph Cypher dialect(rather than standard):
1. it requires explicit label specification only when referring to node properties: v.`Foo`.name
2. note explicit label specification is not needed for edge properties, so it's e.name instead of e.`Bar`.name
3. it uses double equals sign for comparison: `==` rather than `=`
For instance:
```diff
< MATCH (p:person)-[e:directed]->(m:movie) WHERE m.name = 'The Godfather II'
< RETURN p.name, e.year, m.name;
---
> MATCH (p:`person`)-[e:directed]->(m:`movie`) WHERE m.`movie`.`name` == 'The Godfather II'
> RETURN p.`person`.`name`, e.year, m.`movie`.`name`;
```\n"""
NGQL_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Generate Cypher", "Generate NebulaGraph Cypher"
).replace("Instructions:", NEBULAGRAPH_EXTRA_INSTRUCTIONS)
NGQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=NGQL_GENERATION_TEMPLATE
)
KUZU_EXTRA_INSTRUCTIONS = """
Instructions:
Generate statement with Kùzu Cypher dialect (rather than standard):
1. do not use `WHERE EXISTS` clause to check the existence of a property because Kùzu database has a fixed schema.
2. do not omit relationship pattern. Always use `()-[]->()` instead of `()->()`.
3. do not include any notes or comments even if the statement does not produce the expected result.
```\n"""
KUZU_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Generate Cypher", "Generate Kùzu Cypher"
).replace("Instructions:", KUZU_EXTRA_INSTRUCTIONS)
KUZU_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=KUZU_GENERATION_TEMPLATE
)
GREMLIN_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace("Cypher", "Gremlin")
GREMLIN_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=GREMLIN_GENERATION_TEMPLATE
)
CYPHER_QA_TEMPLATE = """You are an assistant that helps to form nice and human understandable answers.
The information part contains the provided information that you must use to construct an answer.
The provided information is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make the answer sound as a response to the question. Do not mention that you based the result on the given information.
If the provided information is empty, say that you don't know the answer.
Information:
{context}
Question: {question}
Helpful Answer:"""
CYPHER_QA_PROMPT = PromptTemplate(
input_variables=["context", "question"], template=CYPHER_QA_TEMPLATE
)
SPARQL_INTENT_TEMPLATE = """Task: Identify the intent of a prompt and return the appropriate SPARQL query type.
You are an assistant that distinguishes different types of prompts and returns the corresponding SPARQL query types.
Consider only the following query types:
* SELECT: this query type corresponds to questions
* UPDATE: this query type corresponds to all requests for deleting, inserting, or changing triples
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to identify a SPARQL query type.
Do not include any unnecessary whitespaces or any text except the query type, i.e., either return 'SELECT' or 'UPDATE'.
The prompt is:
{prompt}
Helpful Answer:"""
SPARQL_INTENT_PROMPT = PromptTemplate(
input_variables=["prompt"], template=SPARQL_INTENT_TEMPLATE
)
SPARQL_GENERATION_SELECT_TEMPLATE = """Task: Generate a SPARQL SELECT statement for querying a graph database.
For instance, to find all email addresses of John Doe, the following query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?email
WHERE {{
?person foaf:name "John Doe" .
?person foaf:mbox ?email .
}}
```
Instructions:
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to construct a SPARQL query.
Do not include any text except the SPARQL query generated.
The question is:
{prompt}"""
SPARQL_GENERATION_SELECT_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_SELECT_TEMPLATE
)
SPARQL_GENERATION_UPDATE_TEMPLATE = """Task: Generate a SPARQL UPDATE statement for updating a graph database.
For instance, to add 'efpyi@example.com' as a new email address for Jane Doe, the following query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
INSERT {{
?person foaf:mbox <mailto:efpyi@example.com> .
}}
WHERE {{
?person foaf:name "Jane Doe" .
}}
```
Instructions:
Make the query as short as possible and avoid adding unnecessary triples.
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to construct a SPARQL query.
Return only the generated SPARQL query, nothing else.
The information to be inserted is:
{prompt}"""
SPARQL_GENERATION_UPDATE_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_UPDATE_TEMPLATE
)
SPARQL_QA_TEMPLATE = """Task: Generate a natural language response from the results of a SPARQL query.
You are an assistant that creates well-written and human understandable answers.
The information part contains the information provided, which you can use to construct an answer.
The information provided is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make your response sound like the information is coming from an AI assistant, but don't add any information.
Information:
{context}
Question: {prompt}
Helpful Answer:"""
SPARQL_QA_PROMPT = PromptTemplate(
input_variables=["context", "prompt"], template=SPARQL_QA_TEMPLATE
)
AQL_GENERATION_TEMPLATE = """Task: Generate an ArangoDB Query Language (AQL) query from a User Input.
You are an ArangoDB Query Language (AQL) expert responsible for translating a `User Input` into an ArangoDB Query Language (AQL) query.
You are given an `ArangoDB Schema`. It is a JSON Object containing:
1. `Graph Schema`: Lists all Graphs within the ArangoDB Database Instance, along with their Edge Relationships.
2. `Collection Schema`: Lists all Collections within the ArangoDB Database Instance, along with their document/edge properties and a document/edge example.
You may also be given a set of `AQL Query Examples` to help you create the `AQL Query`. If provided, the `AQL Query Examples` should be used as a reference, similar to how `ArangoDB Schema` should be used.
Things you should do:
- Think step by step.
- Rely on `ArangoDB Schema` and `AQL Query Examples` (if provided) to generate the query.
- Begin the `AQL Query` by the `WITH` AQL keyword to specify all of the ArangoDB Collections required.
- Return the `AQL Query` wrapped in 3 backticks (```).
- Use only the provided relationship types and properties in the `ArangoDB Schema` and any `AQL Query Examples` queries.
- Only answer to requests related to generating an AQL Query.
- If a request is unrelated to generating AQL Query, say that you cannot help the user.
Things you should not do:
- Do not use any properties/relationships that can't be inferred from the `ArangoDB Schema` or the `AQL Query Examples`.
- Do not include any text except the generated AQL Query.
- Do not provide explanations or apologies in your responses.
- Do not generate an AQL Query that removes or deletes any data.
Under no circumstance should you generate an AQL Query that deletes any data whatsoever.
ArangoDB Schema:
{adb_schema}
AQL Query Examples (Optional):
{aql_examples}
User Input:
{user_input}
AQL Query:
"""
AQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["adb_schema", "aql_examples", "user_input"],
template=AQL_GENERATION_TEMPLATE,
)
AQL_FIX_TEMPLATE = """Task: Address the ArangoDB Query Language (AQL) error message of an ArangoDB Query Language query.
You are an ArangoDB Query Language (AQL) expert responsible for correcting the provided `AQL Query` based on the provided `AQL Error`.
The `AQL Error` explains why the `AQL Query` could not be executed in the database.
The `AQL Error` may also contain the position of the error relative to the total number of lines of the `AQL Query`.
For example, 'error X at position 2:5' denotes that the error X occurs on line 2, column 5 of the `AQL Query`.
You are also given the `ArangoDB Schema`. It is a JSON Object containing:
1. `Graph Schema`: Lists all Graphs within the ArangoDB Database Instance, along with their Edge Relationships.
2. `Collection Schema`: Lists all Collections within the ArangoDB Database Instance, along with their document/edge properties and a document/edge example.
You will output the `Corrected AQL Query` wrapped in 3 backticks (```). Do not include any text except the Corrected AQL Query.
Remember to think step by step.
ArangoDB Schema:
{adb_schema}
AQL Query:
{aql_query}
AQL Error:
{aql_error}
Corrected AQL Query:
"""
AQL_FIX_PROMPT = PromptTemplate(
input_variables=[
"adb_schema",
"aql_query",
"aql_error",
],
template=AQL_FIX_TEMPLATE,
)
AQL_QA_TEMPLATE = """Task: Generate a natural language `Summary` from the results of an ArangoDB Query Language query.
You are an ArangoDB Query Language (AQL) expert responsible for creating a well-written `Summary` from the `User Input` and associated `AQL Result`.
A user has executed an ArangoDB Query Language query, which has returned the AQL Result in JSON format.
You are responsible for creating an `Summary` based on the AQL Result.
You are given the following information:
- `ArangoDB Schema`: contains a schema representation of the user's ArangoDB Database.
- `User Input`: the original question/request of the user, which has been translated into an AQL Query.
- `AQL Query`: the AQL equivalent of the `User Input`, translated by another AI Model. Should you deem it to be incorrect, suggest a different AQL Query.
- `AQL Result`: the JSON output returned by executing the `AQL Query` within the ArangoDB Database.
Remember to think step by step.
Your `Summary` should sound like it is a response to the `User Input`.
Your `Summary` should not include any mention of the `AQL Query` or the `AQL Result`.
ArangoDB Schema:
{adb_schema}
User Input:
{user_input}
AQL Query:
{aql_query}
AQL Result:
{aql_result}
"""
AQL_QA_PROMPT = PromptTemplate(
input_variables=["adb_schema", "user_input", "aql_query", "aql_result"],
template=AQL_QA_TEMPLATE,
)
NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS = """
Instructions:
Generate the query in openCypher format and follow these rules:
Do not use `NONE`, `ALL` or `ANY` predicate functions, rather use list comprehensions.
Do not use `REDUCE` function. Rather use a combination of list comprehension and the `UNWIND` clause to achieve similar results.
Do not use `FOREACH` clause. Rather use a combination of `WITH` and `UNWIND` clauses to achieve similar results.
\n"""
NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Instructions:", NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS
)
NEPTUNE_OPENCYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"],
template=NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE,
)
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE = """
Write an openCypher query to answer the following question. Do not explain the answer. Only return the query.
Question: "{question}".
Here is the property graph schema:
{schema}
\n"""
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT = PromptTemplate(
input_variables=["schema", "question"],
template=NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE,
)
| true | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """Extract all entities from the following text. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.
Return the output as a single comma-separated list, or NONE if there is nothing of note to return.
EXAMPLE
i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.
Output: Langchain
END OF EXAMPLE
EXAMPLE
i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Sam.
Output: Langchain, Sam
END OF EXAMPLE
Begin!
{input}
Output:"""
ENTITY_EXTRACTION_PROMPT = PromptTemplate(
input_variables=["input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE
)
_DEFAULT_GRAPH_QA_TEMPLATE = """Use the following knowledge triplets to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
GRAPH_QA_PROMPT = PromptTemplate(
template=_DEFAULT_GRAPH_QA_TEMPLATE, input_variables=["context", "question"]
)
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
The question is:
{question}"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
NEBULAGRAPH_EXTRA_INSTRUCTIONS = """
Instructions:
First, generate cypher then convert it to NebulaGraph Cypher dialect(rather than standard):
1. it requires explicit label specification only when referring to node properties: v.`Foo`.name
2. note explicit label specification is not needed for edge properties, so it's e.name instead of e.`Bar`.name
3. it uses double equals sign for comparison: `==` rather than `=`
For instance:
```diff
< MATCH (p:person)-[e:directed]->(m:movie) WHERE m.name = 'The Godfather II'
< RETURN p.name, e.year, m.name;
---
> MATCH (p:`person`)-[e:directed]->(m:`movie`) WHERE m.`movie`.`name` == 'The Godfather II'
> RETURN p.`person`.`name`, e.year, m.`movie`.`name`;
```\n"""
NGQL_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Generate Cypher", "Generate NebulaGraph Cypher"
).replace("Instructions:", NEBULAGRAPH_EXTRA_INSTRUCTIONS)
NGQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=NGQL_GENERATION_TEMPLATE
)
KUZU_EXTRA_INSTRUCTIONS = """
Instructions:
Generate statement with Kùzu Cypher dialect (rather than standard):
1. do not use `WHERE EXISTS` clause to check the existence of a property because Kùzu database has a fixed schema.
2. do not omit relationship pattern. Always use `()-[]->()` instead of `()->()`.
3. do not include any notes or comments even if the statement does not produce the expected result.
```\n"""
KUZU_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Generate Cypher", "Generate Kùzu Cypher"
).replace("Instructions:", KUZU_EXTRA_INSTRUCTIONS)
KUZU_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=KUZU_GENERATION_TEMPLATE
)
GREMLIN_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace("Cypher", "Gremlin")
GREMLIN_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=GREMLIN_GENERATION_TEMPLATE
)
CYPHER_QA_TEMPLATE = """You are an assistant that helps to form nice and human understandable answers.
The information part contains the provided information that you must use to construct an answer.
The provided information is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make the answer sound as a response to the question. Do not mention that you based the result on the given information.
If the provided information is empty, say that you don't know the answer.
Information:
{context}
Question: {question}
Helpful Answer:"""
CYPHER_QA_PROMPT = PromptTemplate(
input_variables=["context", "question"], template=CYPHER_QA_TEMPLATE
)
SPARQL_INTENT_TEMPLATE = """Task: Identify the intent of a prompt and return the appropriate SPARQL query type.
You are an assistant that distinguishes different types of prompts and returns the corresponding SPARQL query types.
Consider only the following query types:
* SELECT: this query type corresponds to questions
* UPDATE: this query type corresponds to all requests for deleting, inserting, or changing triples
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to identify a SPARQL query type.
Do not include any unnecessary whitespaces or any text except the query type, i.e., either return 'SELECT' or 'UPDATE'.
The prompt is:
{prompt}
Helpful Answer:"""
SPARQL_INTENT_PROMPT = PromptTemplate(
input_variables=["prompt"], template=SPARQL_INTENT_TEMPLATE
)
SPARQL_GENERATION_SELECT_TEMPLATE = """Task: Generate a SPARQL SELECT statement for querying a graph database.
For instance, to find all email addresses of John Doe, the following query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?email
WHERE {{
?person foaf:name "John Doe" .
?person foaf:mbox ?email .
}}
```
Instructions:
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to construct a SPARQL query.
Do not include any text except the SPARQL query generated.
The question is:
{prompt}"""
SPARQL_GENERATION_SELECT_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_SELECT_TEMPLATE
)
SPARQL_GENERATION_UPDATE_TEMPLATE = """Task: Generate a SPARQL UPDATE statement for updating a graph database.
For instance, to add 'PI:EMAIL:efpyi@example.comEND_PI' as a new email address for Jane Doe, the following query in backticks would be suitable:
```
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
INSERT {{
?person foaf:mbox <mailto:PI:EMAIL:efpyi@example.comEND_PI> .
}}
WHERE {{
?person foaf:name "Jane Doe" .
}}
```
Instructions:
Make the query as short as possible and avoid adding unnecessary triples.
Use only the node types and properties provided in the schema.
Do not use any node types and properties that are not explicitly provided.
Include all necessary prefixes.
Schema:
{schema}
Note: Be as concise as possible.
Do not include any explanations or apologies in your responses.
Do not respond to any questions that ask for anything else than for you to construct a SPARQL query.
Return only the generated SPARQL query, nothing else.
The information to be inserted is:
{prompt}"""
SPARQL_GENERATION_UPDATE_PROMPT = PromptTemplate(
input_variables=["schema", "prompt"], template=SPARQL_GENERATION_UPDATE_TEMPLATE
)
SPARQL_QA_TEMPLATE = """Task: Generate a natural language response from the results of a SPARQL query.
You are an assistant that creates well-written and human understandable answers.
The information part contains the information provided, which you can use to construct an answer.
The information provided is authoritative, you must never doubt it or try to use your internal knowledge to correct it.
Make your response sound like the information is coming from an AI assistant, but don't add any information.
Information:
{context}
Question: {prompt}
Helpful Answer:"""
SPARQL_QA_PROMPT = PromptTemplate(
input_variables=["context", "prompt"], template=SPARQL_QA_TEMPLATE
)
AQL_GENERATION_TEMPLATE = """Task: Generate an ArangoDB Query Language (AQL) query from a User Input.
You are an ArangoDB Query Language (AQL) expert responsible for translating a `User Input` into an ArangoDB Query Language (AQL) query.
You are given an `ArangoDB Schema`. It is a JSON Object containing:
1. `Graph Schema`: Lists all Graphs within the ArangoDB Database Instance, along with their Edge Relationships.
2. `Collection Schema`: Lists all Collections within the ArangoDB Database Instance, along with their document/edge properties and a document/edge example.
You may also be given a set of `AQL Query Examples` to help you create the `AQL Query`. If provided, the `AQL Query Examples` should be used as a reference, similar to how `ArangoDB Schema` should be used.
Things you should do:
- Think step by step.
- Rely on `ArangoDB Schema` and `AQL Query Examples` (if provided) to generate the query.
- Begin the `AQL Query` by the `WITH` AQL keyword to specify all of the ArangoDB Collections required.
- Return the `AQL Query` wrapped in 3 backticks (```).
- Use only the provided relationship types and properties in the `ArangoDB Schema` and any `AQL Query Examples` queries.
- Only answer to requests related to generating an AQL Query.
- If a request is unrelated to generating AQL Query, say that you cannot help the user.
Things you should not do:
- Do not use any properties/relationships that can't be inferred from the `ArangoDB Schema` or the `AQL Query Examples`.
- Do not include any text except the generated AQL Query.
- Do not provide explanations or apologies in your responses.
- Do not generate an AQL Query that removes or deletes any data.
Under no circumstance should you generate an AQL Query that deletes any data whatsoever.
ArangoDB Schema:
{adb_schema}
AQL Query Examples (Optional):
{aql_examples}
User Input:
{user_input}
AQL Query:
"""
AQL_GENERATION_PROMPT = PromptTemplate(
input_variables=["adb_schema", "aql_examples", "user_input"],
template=AQL_GENERATION_TEMPLATE,
)
AQL_FIX_TEMPLATE = """Task: Address the ArangoDB Query Language (AQL) error message of an ArangoDB Query Language query.
You are an ArangoDB Query Language (AQL) expert responsible for correcting the provided `AQL Query` based on the provided `AQL Error`.
The `AQL Error` explains why the `AQL Query` could not be executed in the database.
The `AQL Error` may also contain the position of the error relative to the total number of lines of the `AQL Query`.
For example, 'error X at position 2:5' denotes that the error X occurs on line 2, column 5 of the `AQL Query`.
You are also given the `ArangoDB Schema`. It is a JSON Object containing:
1. `Graph Schema`: Lists all Graphs within the ArangoDB Database Instance, along with their Edge Relationships.
2. `Collection Schema`: Lists all Collections within the ArangoDB Database Instance, along with their document/edge properties and a document/edge example.
You will output the `Corrected AQL Query` wrapped in 3 backticks (```). Do not include any text except the Corrected AQL Query.
Remember to think step by step.
ArangoDB Schema:
{adb_schema}
AQL Query:
{aql_query}
AQL Error:
{aql_error}
Corrected AQL Query:
"""
AQL_FIX_PROMPT = PromptTemplate(
input_variables=[
"adb_schema",
"aql_query",
"aql_error",
],
template=AQL_FIX_TEMPLATE,
)
AQL_QA_TEMPLATE = """Task: Generate a natural language `Summary` from the results of an ArangoDB Query Language query.
You are an ArangoDB Query Language (AQL) expert responsible for creating a well-written `Summary` from the `User Input` and associated `AQL Result`.
A user has executed an ArangoDB Query Language query, which has returned the AQL Result in JSON format.
You are responsible for creating an `Summary` based on the AQL Result.
You are given the following information:
- `ArangoDB Schema`: contains a schema representation of the user's ArangoDB Database.
- `User Input`: the original question/request of the user, which has been translated into an AQL Query.
- `AQL Query`: the AQL equivalent of the `User Input`, translated by another AI Model. Should you deem it to be incorrect, suggest a different AQL Query.
- `AQL Result`: the JSON output returned by executing the `AQL Query` within the ArangoDB Database.
Remember to think step by step.
Your `Summary` should sound like it is a response to the `User Input`.
Your `Summary` should not include any mention of the `AQL Query` or the `AQL Result`.
ArangoDB Schema:
{adb_schema}
User Input:
{user_input}
AQL Query:
{aql_query}
AQL Result:
{aql_result}
"""
AQL_QA_PROMPT = PromptTemplate(
input_variables=["adb_schema", "user_input", "aql_query", "aql_result"],
template=AQL_QA_TEMPLATE,
)
NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS = """
Instructions:
Generate the query in openCypher format and follow these rules:
Do not use `NONE`, `ALL` or `ANY` predicate functions, rather use list comprehensions.
Do not use `REDUCE` function. Rather use a combination of list comprehension and the `UNWIND` clause to achieve similar results.
Do not use `FOREACH` clause. Rather use a combination of `WITH` and `UNWIND` clauses to achieve similar results.
\n"""
NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE = CYPHER_GENERATION_TEMPLATE.replace(
"Instructions:", NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS
)
NEPTUNE_OPENCYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"],
template=NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE,
)
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE = """
Write an openCypher query to answer the following question. Do not explain the answer. Only return the query.
Question: "{question}".
Here is the property graph schema:
{schema}
\n"""
NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_PROMPT = PromptTemplate(
input_variables=["schema", "question"],
template=NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE,
)
|
hf_public_repos/JorisdeJong123/7-Days-of-LangChain | hf_public_repos/JorisdeJong123/7-Days-of-LangChain/day_4/scientific_newsletter.py | """
This script shows how to create a newsletter based on the latest Arxiv articles.
We're using an easy LangChain implementation to show how to use the different components of LangChain.
This is part of my '7 Days of LangChain' series.
Check out the explanation about the code on my Twitter (@JorisTechTalk)
"""
from langchain.document_loaders import ArxivLoader
from langchain.agents.agent_toolkits import GmailToolkit
from langchain import OpenAI
import os
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain import LLMChain
from langchain.callbacks import get_openai_callback
import arxiv
# Topic of the newsletter you want to write about
query = "LLM"
# Set up the ArxivLoader
search = arxiv.Search(
query = query,
max_results = 4,
sort_by = arxiv.SortCriterion.SubmittedDate
)
# Initialize the docs variable
docs = ""
# Add all relevant information to the docs variable
for result in search.results():
docs += "Title: " + result.title + "\n"
docs += "Abstract: " + result.summary + "\n"
docs += "Download URL: " + result.pdf_url + "\n"
print(result.links)
for link in result.links:
docs += "Links: " + link.href + "\n"
# Track cost
with get_openai_callback() as cb:
# Template for the newsletter
prompt_newsletter_template = """
You are a newsletter writer. You write newsletters about scientific articles. You introduce the article and show a small summary to tell the user what the article is about.
You're main goal is to write a newsletter which contains summaries to interest the user in the articles.
--------------------
{text}
--------------------
Start with the title of the article. Then, write a small summary of the article.
Below each summary, include the link to the article containing /abs/ in the URL.
Summaries:
"""
PROMPT_NEWSLETTER = PromptTemplate(template=prompt_newsletter_template, input_variables=["text"])
# Set the OpenAI API key
os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY_HERE'
# Initialize the language model
llm = ChatOpenAI(temperature=0.6, model_name="gpt-3.5-turbo-16k", verbose=True)
# Initialize the LLMChain
newsletter_chain = LLMChain(llm=llm, prompt=PROMPT_NEWSLETTER, verbose=True)
# Run the LLMChain
newsletter = newsletter_chain.run(docs)
# Write newsletter to a text file
with open("newsletter.txt", "w") as f:
f.write(newsletter)
# Set toolkit
toolkit = GmailToolkit()
# Initialize the Gmail agent
agent = initialize_agent(
tools=toolkit.get_tools(),
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
# Run the agent
instructions = f"""
Write a draft directed to jorisdejong456@gmail.com, NEVER SEND THE EMAIL.
The subject should be 'Scientific Newsletter about {query}'.
The content should be the following: {newsletter}.
"""
agent.run(instructions)
print(cb) | 0 | 102 | [{"tag": "EMAIL", "value": "jorisdejong456@gmail.com", "start": 2878, "end": 2902}] | true | 1 | """
This script shows how to create a newsletter based on the latest Arxiv articles.
We're using an easy LangChain implementation to show how to use the different components of LangChain.
This is part of my '7 Days of LangChain' series.
Check out the explanation about the code on my Twitter (@JorisTechTalk)
"""
from langchain.document_loaders import ArxivLoader
from langchain.agents.agent_toolkits import GmailToolkit
from langchain import OpenAI
import os
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain import LLMChain
from langchain.callbacks import get_openai_callback
import arxiv
# Topic of the newsletter you want to write about
query = "LLM"
# Set up the ArxivLoader
search = arxiv.Search(
query = query,
max_results = 4,
sort_by = arxiv.SortCriterion.SubmittedDate
)
# Initialize the docs variable
docs = ""
# Add all relevant information to the docs variable
for result in search.results():
docs += "Title: " + result.title + "\n"
docs += "Abstract: " + result.summary + "\n"
docs += "Download URL: " + result.pdf_url + "\n"
print(result.links)
for link in result.links:
docs += "Links: " + link.href + "\n"
# Track cost
with get_openai_callback() as cb:
# Template for the newsletter
prompt_newsletter_template = """
You are a newsletter writer. You write newsletters about scientific articles. You introduce the article and show a small summary to tell the user what the article is about.
You're main goal is to write a newsletter which contains summaries to interest the user in the articles.
--------------------
{text}
--------------------
Start with the title of the article. Then, write a small summary of the article.
Below each summary, include the link to the article containing /abs/ in the URL.
Summaries:
"""
PROMPT_NEWSLETTER = PromptTemplate(template=prompt_newsletter_template, input_variables=["text"])
# Set the OpenAI API key
os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY_HERE'
# Initialize the language model
llm = ChatOpenAI(temperature=0.6, model_name="gpt-3.5-turbo-16k", verbose=True)
# Initialize the LLMChain
newsletter_chain = LLMChain(llm=llm, prompt=PROMPT_NEWSLETTER, verbose=True)
# Run the LLMChain
newsletter = newsletter_chain.run(docs)
# Write newsletter to a text file
with open("newsletter.txt", "w") as f:
f.write(newsletter)
# Set toolkit
toolkit = GmailToolkit()
# Initialize the Gmail agent
agent = initialize_agent(
tools=toolkit.get_tools(),
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
# Run the agent
instructions = f"""
Write a draft directed to dycjh@example.com, NEVER SEND THE EMAIL.
The subject should be 'Scientific Newsletter about {query}'.
The content should be the following: {newsletter}.
"""
agent.run(instructions)
print(cb) | true | """
This script shows how to create a newsletter based on the latest Arxiv articles.
We're using an easy LangChain implementation to show how to use the different components of LangChain.
This is part of my '7 Days of LangChain' series.
Check out the explanation about the code on my Twitter (@JorisTechTalk)
"""
from langchain.document_loaders import ArxivLoader
from langchain.agents.agent_toolkits import GmailToolkit
from langchain import OpenAI
import os
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain import LLMChain
from langchain.callbacks import get_openai_callback
import arxiv
# Topic of the newsletter you want to write about
query = "LLM"
# Set up the ArxivLoader
search = arxiv.Search(
query = query,
max_results = 4,
sort_by = arxiv.SortCriterion.SubmittedDate
)
# Initialize the docs variable
docs = ""
# Add all relevant information to the docs variable
for result in search.results():
docs += "Title: " + result.title + "\n"
docs += "Abstract: " + result.summary + "\n"
docs += "Download URL: " + result.pdf_url + "\n"
print(result.links)
for link in result.links:
docs += "Links: " + link.href + "\n"
# Track cost
with get_openai_callback() as cb:
# Template for the newsletter
prompt_newsletter_template = """
You are a newsletter writer. You write newsletters about scientific articles. You introduce the article and show a small summary to tell the user what the article is about.
You're main goal is to write a newsletter which contains summaries to interest the user in the articles.
--------------------
{text}
--------------------
Start with the title of the article. Then, write a small summary of the article.
Below each summary, include the link to the article containing /abs/ in the URL.
Summaries:
"""
PROMPT_NEWSLETTER = PromptTemplate(template=prompt_newsletter_template, input_variables=["text"])
# Set the OpenAI API key
os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY_HERE'
# Initialize the language model
llm = ChatOpenAI(temperature=0.6, model_name="gpt-3.5-turbo-16k", verbose=True)
# Initialize the LLMChain
newsletter_chain = LLMChain(llm=llm, prompt=PROMPT_NEWSLETTER, verbose=True)
# Run the LLMChain
newsletter = newsletter_chain.run(docs)
# Write newsletter to a text file
with open("newsletter.txt", "w") as f:
f.write(newsletter)
# Set toolkit
toolkit = GmailToolkit()
# Initialize the Gmail agent
agent = initialize_agent(
tools=toolkit.get_tools(),
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
# Run the agent
instructions = f"""
Write a draft directed to PI:EMAIL:dycjh@example.comEND_PI, NEVER SEND THE EMAIL.
The subject should be 'Scientific Newsletter about {query}'.
The content should be the following: {newsletter}.
"""
agent.run(instructions)
print(cb) |