from langchain_community.document_loaders import DirectoryLoader
from langchain_openai import AzureOpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
import os

# ==== Azure Embedding API 配置 ====
os.environ["AZURE_OPENAI_API_KEY"] = "EVlHLUCSdiDb0yVKwkm6USjrqshc2hZxyoLD5EI7PBzl3coU7ApTJQQJ99BCACYeBjFXJ3w3AAABACOGVmn4"
os.environ["AZURE_OPENAI_ENDPOINT"] = "https://eastus-0303.openai.azure.com/"
os.environ["AZURE_OPENAI_API_VERSION"] = "2023-05-15"  # 用你 endpoint1 里的版本

# ==== Embedding 模型信息 ====
embedding_deployment = "text-embedding-3-large-1"  # 部署名
embedding_model = "text-embedding-3-large"         # 模型类型

# 1. 加载本地文档
loader = DirectoryLoader("company_data", glob="xinxi/ziliao.txt")
documents = loader.load()

# 2. 切分文档
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
docs = text_splitter.split_documents(documents)

# 3. 创建 Azure OpenAI Embeddings
embeddings = AzureOpenAIEmbeddings(
    deployment=embedding_deployment,
    model=embedding_model,
    api_key=os.environ["AZURE_OPENAI_API_KEY"],
    azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
    api_version=os.environ["AZURE_OPENAI_API_VERSION"]
)

# 4. 存储到 Chroma 数据库
db = Chroma.from_documents(docs, embeddings, persist_directory="company_db")
print("✅ 向量数据库已创建并保存到 company_db")

