# from django.shortcuts import render
#
# # Create your views here.
# from langchain_community.embeddings import DashScopeEmbeddings
# from langchain_community.vectorstores import FAISS
# from langchain_core.example_selectors import SemanticSimilarityExampleSelector
# from langchain_core.prompts import FewShotPromptTemplate
# from rest_framework.views import APIView
# from rest_framework.response import Response
# from langchain.prompts import PromptTemplate
#
# class MyApiView(APIView):
#     """
#     自定义API视图类，处理与关键词相关的查询请求
#     """
#     def get(self, request):
#         """
#         处理GET请求，接收关键词并返回相关示例ID
#         """
#         # 从请求中获取关键词参数
#         words = request.query_params.get('words')
#         # 如果关键词为空，则返回错误响应
#         if words is None:
#             return Response({'msg': '关键词不能为空'})
#
#         # 定义一组示例数据，每个示例包含一个ID和一组特征
#         examples = [
#             {"id": "1", "features": "心脏病症状：心慌、气短、心跳加速"},
#             {"id": "2", "features": "感冒症状：流鼻涕、头晕、出冷汗"},
#             {"id": "3", "features": "糖尿病症状：血糖高、体重下降、心慌"},
#             {"id": "4", "features": "高血压症状：奔达、调高、视力模糊"},
#         ]
#
#         # 定义一个模板，用于生成示例,其中id和features是输入变量，template中用{}表示输入变量
#         examples_prompt = PromptTemplate(
#             input_variables = ["id","features"],
#             template = "{id},"
#         )
#
#         # 使用SemanticSimilarityExampleSelector根据示例数据、嵌入模型和FAISS索引选择与关键词最相关的示例
#         examples_selector = SemanticSimilarityExampleSelector.from_examples(
#             examples,
#             DashScopeEmbeddings(), # 使用DashScopeEmbeddings作为嵌入模型
#             FAISS,
#             k = 2
#         )
#
#         # 定义一个FewShotPromptTemplate，用于根据选择的示例和模板生成最终的提示
#         few_show_prompt = FewShotPromptTemplate(
#             example_selector = examples_selector,
#             example_prompt=examples_prompt,
#             prefix="",
#             suffix="",
#             input_variables=["words"]
#         )
#
#         # 根据关键词格式化最终的提示
#         res = few_show_prompt.format(word=words)
#         print(res)
#
#         # 将生成的提示按逗号分割成列表
#         items = res.split(',')
#         print('-------------------------------------------')
#         print(items)
#
#         # 将列表中的每个元素转换为整数，并过滤掉空字符串
#         id_list = [int(i.strip()) for i in items if i.strip()]
#
#         # 定义一个不需要的ID列表
#         v_list = [1]
#
#         # 过滤掉不需要的ID，得到最终的ID列表
#         new_id_list = [i for i in id_list if i  not in v_list]
#         print('--------------------------相似')
#         print(new_id_list)
#
#         # 返回成功响应，包含最终的ID列表
#         return Response({"message":"语义相近数据查询成功","data":new_id_list})
#
from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import LocalFileStore
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from rest_framework.response import Response
from rest_framework.views import APIView
from sympy.codegen.cnodes import static



# from langchain.prompts import PromptTemplate,FewShotPromptTemplate,LengthBasedExampleSelector
# from langchain_community.llms.tongyi import Tongyi
#
# # 创建实例数据
# examples = [
#     {"input": "患者如何在家测量血压？", "output": "患者可以在家使用电子血压计测量血压，遵循说明书上的步骤，通常在早上和晚上各测量一次。"},
#     {"input": "糖尿病患者的饮食应该注意什么？", "output": "糖尿病患者应该注意饮食中的糖分和碳水化合物摄入，多食用蔬菜、全谷物和瘦肉，避免高糖和高脂肪食物。"},
#     {"input": "儿童发烧时应该如何处理？", "output": "儿童发烧时，应首先测量体温，如果超过38.5°C，可以使用退烧药，并给孩子多喝水，保持通风，适当减少衣物。"}
# ]
#
# #
# example_prompt = PromptTemplate(
#     input_variables=["input", "output"], # 输入变量
#     template="请问您说的是什么？\n{input}\n回答：\n{output}\n"# 模板
# )
# #
# example_selector = LengthBasedExampleSelector(
#     examples = examples,
#     example_prompt = example_prompt,
#     max_length = 4
#
# )
# dynamic_prompt = FewShotPromptTemplate(
#     example_prompt=example_prompt,
#     example_selector=example_selector,
#     prefix = '根据描述，确定病情。',
#     suffix = "输入：{adjective}\n输出：",
#     input_variables = ['adjective']
# )
#
# prompt = dynamic_prompt.format(adjective="糖尿病患者的饮食")
#
# llm = Tongyi()
# ret = llm.invoke(prompt)
# print('正在加载中·········')
# print(ret)
# from langchain.prompts import PromptTemplate
# from langchain_community.llms.tongyi import Tongyi
#
# retrieval_prompt = PromptTemplate(template="请从医疗数据库中检索与用户查询最相关的{k}个文件块。",input_variavles = ["query","k"])
#
#
# qa_prompt = PromptTemplate(template= "基于以下医疗知识，回答用户的问题：{context}\n问题：{query}",input_variables = ["context","query"])
#
# pipeline_prompt =retrieval_prompt + "\n" +qa_prompt
# print(pipeline_prompt)
#
# prompt = pipeline_prompt.format(query = "头疼",context="头疼,胃痛",k=1)
#
# print(prompt)
#
# llm = Tongyi()
# ret = llm.invoke(prompt)
# print(ret)


# from langchain_core.prompts import load_prompt
#
# prompt = load_prompt('prompt.json',encoding='utf-8')
# pp = prompt.format(name = '苹果')
# print(pp)
#
# #
# # # 修改前代码
# from langchain.prompts import PipelinePromptTemplate
# from langchain.prompts import PromptTemplate
# from langchain_community.llms import Tongyi
#
# # 构建多步提示词模板
#
# full_template = "{name}{skill}{limit}"
# full_template = PromptTemplate.from_template(full_template)
#
# # 第一层角色设计
# name = """#角色 你是一个擅长{name}的游戏玩家"""
# name_prompt = PromptTemplate.from_template(name)
# # 第二层技能设计
# skill = """##技能 {skill}"""
# skill_prompt = PromptTemplate.from_template(skill)
# # 第三层限制条件
# limit = """###限制 {limit}"""
# limit_prompt = PromptTemplate.from_template(limit)
#
# input_prompts = [
#     ('name', name_prompt),
#     ('skill', skill_prompt),
#     ('limit', limit_prompt)
# ]
#
# pipeline_prompt = PipelinePromptTemplate(final_prompt=full_template,pipeline_prompts=input_prompts)
#
# ret1 = pipeline_prompt.format(name="剪刀石头布",skill="""### 技能：进行剪刀石头布游戏
# 1、当对方说出一个剪刀、石头或者布的似乎和，你需要判断是不是一个剪刀、石头或者布，如果不是，提示用户。
# 2、你可以接龙剪刀石头布。
# 3、你也可以出剪刀或者布，不能其它提示词。
# 4、每局你先开始""",limit="只能进行剪刀石头布游戏，拒绝回答其它游戏")
#
# ret2 = pipeline_prompt.format(name="成语接龙",skill="""### 技能：进行成语接龙游戏
# 1、当对方说出一个成语后，你需要判断是不是一个成语，如果不是，提示用户，要求用户重新输入一个新的成语。
# 2、可以使用同音字接龙。
# 3、你说的必须是成语，不能一般词语
# 4、如果你找不到下一个新的成语，重新开始接龙。""",limit="只能进行成语接龙，拒绝回答其它问题")
#
# pp = "游戏规则为{ret},用户输入{input}"
#
# prompt_template = PromptTemplate.from_template(pp)
#
# prompt = prompt_template.format(ret=ret1, input="剪刀石头布")
#
# print(prompt)
#
# tongyi = Tongyi()
# res = tongyi.invoke(prompt)
# print(res)

#
#
#
# # 稍加修改后的代码，仅供娱乐
# from langchain.prompts.pipeline import PipelinePromptTemplate
# from langchain.prompts import PromptTemplate
# from langchain_community.llms import Tongyi
#
# # 定义最终的提示模板，它将由一系列变量构成
# full_template = "{name}{skill}{limit}"
# full_prompt = PromptTemplate.from_template(full_template)
#
# # 定义各个部分的模板
# name = """# 角色
# 你是一个擅长{name}的游戏玩家。"""
# name_prompt = PromptTemplate.from_template(name)
#
# skill = """## 技能
# {skill}"""
# skill_prompt = PromptTemplate.from_template(skill)
#
# limit = """## 限制
# {limit}"""
# limit_prompt = PromptTemplate.from_template(limit)
#
# # 将各个部分的模板组合成一个处理流程
# input_prompts = [
#     ("name", name_prompt),
#     ("skill", skill_prompt),
#     ("limit", limit_prompt)
# ]
# pipeline_prompt = PipelinePromptTemplate(final_prompt=full_prompt, pipeline_prompts=input_prompts)
#
# # 初始化 Tongyi 大模型
# tongyi = Tongyi()
#
# def play_game():
#     # 使用 pipeline_prompt 生成具体的提示
#     ret = pipeline_prompt.format(
#         name="成语接龙",
#         skill="""### 技能1:进行成语接龙游戏
# 1.当对方说出一个成语后，你首先判断它是不是一个成语。如果不是则提示用户，要求用户重新输入;如果是成语，则需要根据该成语的最后一个字，说出一个新的成语。
# 2.可以使用同音字接龙。
# 3.你说的必须是“成语”，不能是一般的词语
# 4.当用户或你无法找到下一个成语时，此游戏结束。用户可以输入一个新的成语，重新开始接龙""",
#         limit="只进行成语接龙游戏，拒绝回答其他话题"
#     )
#
#     # 定义一个模板，用于生成与 Tongyi 模型交互的提示
#     pp = "游戏规则为{ret},用户输入{input}"
#     promptTemplate = PromptTemplate.from_template(pp)
#
#     while True:
#         # 获取用户输入
#         user_input = input("请输入你的成语（输入'退出'结束游戏）：")
#         if user_input.lower() == '退出':
#             print("游戏结束，感谢您的参与！")
#             break
#
#         # 生成 prompt
#         prompt = promptTemplate.format(ret=ret, input=user_input)
#         print(f"生成的提示：{prompt}")
#
#         # 调用 Tongyi 模型处理生成的 prompt
#         response = tongyi.invoke(prompt)
#         print(f"AI 的回应：{response}\n")
#
# # 开始游戏
# play_game()
#
# # from langchain_community.llms import Tongyi
# #
# # llm = Tongyi()
# #
# #
# # # 实现多轮输出文字，流式
# # while True:
# #     message = input("请输入您的问题：")
# #     ret = llm.invoke(message)
# #     print(ret)
# #     bye = ['退出','再见','拜拜','下次再见','下次见','下次聊','下次再聊']
# #     if message in bye:
# #         break
# #     else:
# #         continue
#
#
# # # 导入prompts模块的PromptTemplate类
# # from langchain.prompts import PromptTemplate
# # # 导入通义千问大模型
# # from langchain_community.llms import Tongyi
# #
# # # 自定义一个模板文件
# # template = "{country}的首都是什么？"
# # # 创建一个用户输入
# # country = input('请输入国家名称：')
# # # 创建一个PromptTemplate对象
# # prompt = PromptTemplate.from_template(template)
# # # 使用PromptTemplate对象生成一个提示
# # prompt_template = prompt.format(country=country)
# # # 使用通义千问大模型进行推理
# # llm = Tongyi()
# # ret = llm.invoke(prompt_template)
# # print(ret)
# # from langchain.prompts import PromptTemplate
# # from langchain_community.llms import Tongyi
# #
# # # 自定义一个模板文件
# # template = "对文章{msg}进行总结，最多给30字。"
# #
# # promptTemplate = PromptTemplate.from_template(template)
# #
# # UserInput = input("请输入文章：")
# # prompt = promptTemplate.format_prompt(msg=UserInput)
# # llm = Tongyi()
# # ret = llm.invoke(prompt)
# # print(ret)
# # from langchain.prompts import PromptTemplate
# # from langchain_community.llms import Tongyi
# #
# # template = ("对商品进行评价{mess}，以下是各个评价等级："
# #             "1.好评，2.中评，3.差评。")
# # promptTemplate = PromptTemplate.from_template(template)
# # UserInput = input("请输入评价：")
# # prompt = promptTemplate.format_prompt(mess=UserInput)
# # llm = Tongyi()
# # ret = llm.invoke(prompt)
# # # print(ret)
# # if "好评" in ret:
# #     print("好评")
# # elif "中评" in ret:
# #     print("中评")
# # else:
# #     print("差评")
#
# # from langchain.prompts import AIMessagePromptTemplate, HumanMessagePromptTemplate
# # from langchain_community.llms import Tongyi
# # from langchain_core.prompts import ChatPromptTemplate
# #
# # # 人类模板
# # human_template = "你喜欢{stuff}，所有写一篇50字的小作文。"
# # human_prompt = HumanMessagePromptTemplate.from_template(human_template)
# # # 机器模板
# # ai_template = "你是一个作品生成专家，我会根据用户输入，生成合格的内容."
# # ai_prompt = AIMessagePromptTemplate.from_template(ai_template)
# #
# # chat_prompt = ChatPromptTemplate.from_messages([human_prompt, ai_prompt])
# #
# # user_input = input("请输入#：")
# #
# # formatted_prompt = chat_prompt.format_prompt(stuff=user_input).to_messages()
# #
# # llm = Tongyi()
# # ret = llm.invoke(formatted_prompt)
# # print(ret)
# #
# #
# # from langchain.prompts import ChatMessagePromptTemplate
# # from langchain_community.llms import Tongyi
# # message = '请帮我写一篇关于{mes}的文章，字数在50字'
# # promptTemplate = ChatMessagePromptTemplate.from_template(role='全科医生',template=message)
# # prompt = promptTemplate.format(mes='感冒')
# # llm = Tongyi()
# # ret = llm.invoke(prompt.content)
# # 调用语言模型的invoke方法，传入格式化后的提示内容，生成文章内容，并将结果存储在变量ret中。
# # print(ret)
#
# # prompt = "假设你是一个非常擅长编程的AI，现在给你如下函数，你会按照如下格式，输出这行代码的名称、源代码、中文注释\n函数名称:{function_name}""\n返回类型:{function_type}\n源代码:\n{source_code}\n代码注释:"
#
# # def hello(e):
# #     print('hello' + e)
# #
# # import inspect
# # class ApiPrompt(StringPromptTemplate):
# #     def format(self, **kwargs: Any):
# #         code = inspect.getsource(kwargs['function_name'])
# #         pp = prompt.format(function_name=kwargs['function_name'].__name__,function_type=kwargs['function_type'],source_code=code)
# #         return pp
# #
# # prom = ApiPrompt(input_variables=['function_name', 'function_type'])
# #
# # pro = prom.format(function_name=hello,function_type='str')
# # print(pro)
# # print('--------------------------')
# # llm = Tongyi()
# # ret = llm.invoke(pro)
# # print(ret)
# #
#
#
# # pp = '{{country}}的首都是哪里？？？'
# # prompt_tem = PromptTemplate.from_template(pp,template_format='jinja2')
# #
# # prompt = prompt_tem.format(country='中国')
# # llm = Tongyi()
# # ret = llm.invoke(prompt)
# # print(ret)


# from typing import Dict,List
#
# from langchain.prompts.example_selector.base import BaseExampleSelector
# from langchain_community.llms import Tongyi
#
#
# class MedicalExampleSelector(BaseExampleSelector):
#     """
#     医疗示例选择器类，用于根据输入的疾病名称选择相关示例
#     """
#     def __init__(self, example: list[Dict[str, str]]):
#         """
#         初始化示例选择器，接收示例列表
#         """
#         self.examples = example
#
#     def add_example(self, example: Dict[str, str]) -> None:
#         """
#         添加新的示例到选择器中
#         """
#         self.examples.append(example)
#
#     def select_examples(self, input_variables: Dict[str, str]) -> List[Dict[str, str]]:
#         """
#         根据输入的疾病名称选择相关示例
#         """
#         disease_name = input_variables.get('disease_name', None)
#         if disease_name is None:
#             return []
#         selected_example = [
#             example for example in self.examples
#             if disease_name.lower() in example['input'].lower()
#         ]
#         return selected_example
#
#
# examples = [
#     {"input": "流感症状", "output": "发烧、咳嗽、喉咙痛、身体酸痛。"},
#     {"input": "糖尿病治疗", "output": "饮食控制、运动、药物"},
#     {"input": "新冠肺炎症状", "output": "发烧、咳嗽、疲劳、味觉或嗅觉丧失。"},
#     {"input": "糖尿病症状", "output": "口渴、尿频、体重减轻。"},
# ]
#
# medical_example_selector = MedicalExampleSelector(examples)
#
# selected_examples = medical_example_selector.select_examples({
#     "disease_name": "糖尿病"
# })
# print(selected_examples)
#
# prompt = "\n".join(f"{item['input']}:{item['output']}" for item in selected_examples)
#
# print(prompt)
#
# llm = Tongyi()
# ret = llm.invoke(prompt)
# print(ret)

#
# from sklearn.feature_extraction.text import TfidfVectorizer
# from sklearn.metrics.pairwise import cosine_similarity
# from typing import List, Dict
# import numpy as np
#
# # 假设的商品数据，每个商品由多个标签（特征）组成
# products = [
#     {"id": 1, "features": ["时尚", "运动鞋", "跑步"]},
#     {"id": 2, "features": ["休闲", "运动鞋", "篮球"]},
#     {"id": 3, "features": ["商务", "皮鞋", "正装"]},
#     {"id": 4, "features": ["户外", "徒步鞋", "探险"]},
#     {"id": 5, "features": ["时尚", "板鞋", "滑板"]},
# ]
#
#
# # 将商品特征转换为文本，用于TF-IDF向量化
# def product_to_text(product):
#     """
#     将商品特征转换为文本
#     """
#     return " ".join(product["features"])
#
#
# # 文本向量化
# vectorizer = TfidfVectorizer()
# product_texts = [product_to_text(p) for p in products]
# product_vectors = vectorizer.fit_transform(product_texts)
#
#
# # 余弦相似度计算函数
# def recommend_products(query_feature: List[str], k: int = 2) -> List[Dict]:
#     """
#     根据查询特征推荐商品
#     """
#     query_text = " ".join(query_features)
#     query_vector = vectorizer.transform([query_text])
#
#     # 计算查询与所有商品的余弦相似度
#     similarities = cosine_similarity(query_vector, product_vectors).ravel()
#
#     # 获取最相似的k个商品的索引
#     top_indices = np.argsort(-similarities)[:k]
#
#     # 返回推荐的商品列表
#     return [products[idx] for idx in top_indices]
#
#
# # 示例查询
# query_features = ["时尚", "休闲"]
# recommended_products = recommend_products(query_features)
# print(recommended_products)
#
# #
# class Dbutils:
#     def __init__(self):
#         self.store = LocalFileStore('./cache/')
#         self.embeddings = DashScopeEmbeddings()
#         self.cached_embeddings = CacheBackedEmbeddings.from_bytes_store(self.embeddings,self.store,namespace=self.embeddings.model)
#
#     def add(self,chunks,key):
#         db = FAISS.from_documents(chunks,self.cached_embeddings)
#         db.save_local(key)
#
#
#     def search(self,query,key,count=3):
#         db = FAISS.load_local(key,self.cached_embeddings,allow_dangerous_deserialization=True)
#         res = db.similarity_search(query,k=count)
#         return res
#
#
# import os.path
#
# from rest_framework.views import APIView
# from rest_framework.response import Response
# import json
# import requests
# from bs4 import BeautifulSoup
#
#
# from langchain_community.document_loaders import TextLoader
# from langchain_text_splitters import CharacterTextSplitter
#
# # 爬取内容
# # class TestViews(APIView):
# #     def get(self, request):
# #         # 获取网页数据，拿到数据，写入本地文件
# #         # 1. 获取网页数据
# #         req_res = requests.get("https://movie.douban.com/",headers={
# #             "user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
# #         })
# #         # print("打印网络数据:",req_res.text)
# #         # 获取请求网页数据html
# #         req_html = req_res.text
# #
# #         movies = []
# #         soup = BeautifulSoup(req_html, "html.parser")
# #
# #         for review in soup.find_all("div", class_="review"):
# #             # print('--------------------')
# #             # print(review)
# #             # 获取地址标签
# #             movie_link_tag = review.find('div',class_="review-hd").find('a')
# #             # 获取电影标题标签
# #             movie_title_tag = review.find('div',class_="review-meta").find_all('a')[-1]
# #
# #             movie_url = movie_link_tag['href']
# #             movie_title = movie_title_tag.text.strip().replace("《","").replace("》","")
# #
# #             movies.append({"电影名称":movie_title,"电影地址":movie_url})
# #
# #         # 2. 写入本地文件
# #         print(movies)
# #         # 将电影数据以json格式进行保存
# #         file_name = './static/upload/movie.json'
# #         print('-------')
# #         print(os.path.join(file_name,'movie.json'))
# #         with open(os.path.join(file_name),'w',encoding='utf-8') as file:
# #             json.dump(movies,file,ensure_ascii=False,indent=4)
# #
# #         # 加载文件，写入向量数据库
# #         doc = TextLoader(file_name,encoding='utf-8').load()
# #         chunks = CharacterTextSplitter('\n',chunk_size=100,chunk_overlap=0).split_documents(doc)
# #
# #         # 存入向量数据库
# #         db = Dbutils()
# #         db.add(chunks,'moviewslist')
# #
# #         # db.search('请把电影给展示一下')
# #
# #
# #         return Response({"message": "数据添加完成"})
# # 上传文件
# class TestViews(APIView):
#     def post(self, request):
#         print("------------------")
#         print("打印xxxx:")
#         print(request.data)
#         print('------------------')
#         print(request.data.get('name'))
#         print("---------------")
#         print(request.FILES)
#         file = request.FILES.get('file')
#         print(file.name)
#         with open('./static/upload/{file.name}','wb') as f:
#             for chunk in file.chunks():
#                 f.write(chunk)
#         return Response({"message": "数据添加完成"})