from langchain_community.document_loaders import TextLoader, CSVLoader, BSHTMLLoader, JSONLoader
from langchain_community.llms.tongyi import Tongyi
from langchain_community.vectorstores import Chroma
from langchain_chroma import Chroma
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_text_splitters import CharacterTextSplitter

#
# loader = TextLoader('new.md')
#
# res = loader.load()
# print(res)

# loader = BSHTMLLoader('loader.html')
# data = loader.load()
# print(data)
# for i in data:
#     print(i.page_content.split())

# loader = CSVLoader('loader.csv')
# ret = loader.load()
# print(ret)
# for i in ret:
#     print('----------------')
#     print(i.page_content.split())

# loader = JSONLoader(
#     file_path="simple_prompt.json",  # JSON 文件的路径
#     jq_schema=".template",  # 要提取的字段
#     text_content=True  # 是否加载文本内容
# )
# # 加载数据
# data = loader.load()
# # 打印加载的数据`
# print(data)

# loader = TextLoader('new.txt',encoding='utf-8')
# ret = loader.load()
# print(ret)
#
# from langchain.text_splitter import CharacterTextSplitter
#
# text_spliter = CharacterTextSplitter(separator="\n", chunk_size=10, chunk_overlap=0)# 分割文本 10 个字符 跳过重叠
#
# res = text_spliter.split_documents(ret)
# print('-------------')
# print(res)
#
#
#
#
# from langchain.text_splitter import CharacterTextSplitter
#
# text_spliter = CharacterTextSplitter.from_tiktoken_encoder(
#     chunk_size=400,
#     chunk_overlap=0
#     )
#
# loader = TextLoader('new.txt')
# print('-------------')
# res = loader.load_and_split(text_spliter)
# print(res)

# doc = TextLoader('new.txt').load()
#
# spliter = CharacterTextSplitter(separator='\n',chunk_size=50,chunk_overlap=0)
#
# chunks =spliter.split_documents(doc)
#
# embeddings = DashScopeEmbeddings()
#
# db = Chroma.from_documents(chunks,embeddings,persist_directory='./chroma')
#
# db1 = Chroma(persist_directory="./chroma",embedding_function=embeddings)
# print(doc)
# qw = input("请输入消息：")
# res = db.similarity_search(qw,k=2)
# print(res)
#
# prompt = ""
#
# for i in res:
#     prompt += i.page_content + '\n'
#
# prompt += "切割锯以上内容回答：" +f"{qw}"
# llm = Tongyi()
# res = llm.invoke(prompt)
# print("回复--------------------")
# print(res)

#
# import os
# from dashscope import Generation
# def get_message(mes):
#     messages = [
#         {'role': 'system', 'content': 'You are a helpful assistant.'},
#         {'role': 'user', 'content': mes}
#     ]
#
#     response = Generation.call(
#         # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key = "sk-xxx",
#         api_key=os.getenv("DASHSCOPE_API_KEY"),
#         model="qwen-plus",
#         messages=messages,
#         result_format="message"
#     )
#
#     if response.status_code == 200:
#         return response.output.choices[0].message.content
#     else:
#         return f"错误码：{response.code}" + '错误信息：{response.message}'


import os
from dashscope import Generation

messages = [
    {'role': 'system', 'content': 'You are a helpful assistant.'},
    {'role': 'user', 'content': '你是谁？'}
    ]
response = Generation.call(
    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key = "sk-xxx",
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    model="qwen-plus",
    messages=messages,
    result_format="message"
)

if response.status_code == 200:
    print(response.output.choices[0].message.content)
else:
    print(f"HTTP返回码：{response.status_code}")
    print(f"错误码：{response.code}")
    print(f"错误信息：{response.message}")
    print("请参考文档：https://help.aliyun.com/zh/model-studio/developer-reference/error-code")
