from io import BytesIO
from typing import List

from asgiref.sync import sync_to_async
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db import transaction
from openai import OpenAI, AsyncOpenAI
from openai.types import FileObject

from llm.entity.dao.FilesDAO import FilesDAO
from llm.models import Files

from decouple import config

from users.models import Users

OPENAI_API_KEY = config('OPENAI_API_KEY', default=False, cast=str)

client = OpenAI(
    api_key=OPENAI_API_KEY
)
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_core.documents import Document
import asyncio
import json
from logging import exception


def _file_to_documents_save(file: InMemoryUploadedFile, vector_store: Chroma):
    try:
        data = json.load(file)
        data_documents: list[Document] = []
        # 遍历每个设备的 JSON 对象并打印
        for device in data:
            dict_str = json.dumps(device, indent=2, ensure_ascii=False)

            # print(type(dict_str))
            data_documents.append(Document(page_content=dict_str))
        ids = vector_store.add_documents(data_documents)
        return ids
    except exception as e:
        print(e)


class FileService:
    @classmethod
    def create(cls, file_objects: List[InMemoryUploadedFile]) -> List[FileObject]:
        """
        用于将用户上传的文件上传到openai sdk平台上，并返回上传的文件对应的文件对象列表
        :param file_objects:
        :return:
        """
        print(f"OPENAI_API_KEY: {OPENAI_API_KEY}")
        file_list = []
        try:
            for file_object in file_objects:
                file_content = file_object.read()
                file_io = BytesIO(file_content)
                file = client.files.create(
                    file=(file_object.name, file_io),
                    purpose="assistants",
                )
                file_list.append(file)
                print(f"大小：{file_io.__sizeof__()}")
                # TODO：异步上下文和数据库的问题需要优化，以及事务管理
                FilesDAO.add(file_id=file.id, file_name=file_object.name, file_size=file_io.__sizeof__())
        except Exception as e:
            raise e
        return file_list

    @classmethod
    def upload_files_to_chroma(cls, files: List[InMemoryUploadedFile], user: Users) -> List[str]:
        """
        上传例如oneNetDevices.json文件这样的含有多个配置信息json数据的文件，可批上传

        将含有多个设备控制信息的配置文件解析为不同的json对象作为文档保存，返回一个本地Chroma向量数据库的id list。和其他接口组合使用实现不同的上传场景
        上传的文件统一存储到作为缓存区的数据库中，在用户添加配置时，将所提供的file ids所对应的文档存储到用户关联的指定的chroma数据库集合空间中
        :param user:
        :param files:
        :return:
        """
        id_list: list[str] = []
        persist_directory = r"D:\DevelopFiles\pycharms\HomeAutoAI\llm\ChromaDB\CacheArea"
        embeddings = OpenAIEmbeddings()
        vector_store = Chroma(
            collection_name="cache_area",
            embedding_function=embeddings,
            persist_directory=persist_directory,
            collection_metadata={
                "user_id": user.user_id
            }
        )
        try:
            for file in files:
                ids = _file_to_documents_save(file, vector_store)
                id_list += ids
            return id_list
        except Exception as e:
            raise e
        finally:
            return id_list

    @classmethod
    def upload_device_config_to_chroma(cls, device_configs: list[dict]) -> List[str]:
        """
        上传单个文件配置信息（键值对）到chroma向量数据库
        :param device_config:
        :return:
        """
        persist_directory = r"D:\DevelopFiles\pycharms\HomeAutoAI\llm\ChromaDB\DeviceConfigs"
        embeddings = OpenAIEmbeddings()
        vector_store = Chroma(
            collection_name="vector_collection_for_agent",
            embedding_function=embeddings,
            persist_directory=persist_directory
        )

        documents = []
        for device in device_configs:
            dict_str = json.dumps(device, indent=2, ensure_ascii=False)
            documents.append(Document(page_content=dict_str))
        id_list = vector_store.add_documents(documents)
        return id_list
