from django.shortcuts import render
from ragflow_sdk import RAGFlow
import re
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import requests
from bs4 import BeautifulSoup
import os
import redis


# Create your views here.


class DatasetClient:
    def __init__(self):
        self.base_url = "http://10.123.66.2:9380/api/v1/datasets"
        self.headers = {
            'Authorization': "Bearer ragflow-g5MmNiNDQ2OTgzZTExZWZhYmY0NzUzMT"
        }

    def fetch_datasets(self):
        try:
            # 发送GET请求
            response = requests.get(self.base_url, headers=self.headers)
            # 检查响应状态码是否为200 OK
            if response.status_code == 200:
                return response.json()  # 返回JSON格式的数据
            else:
                print(f"Error: Received status code {response.status_code}")
                return None
        except Exception as e:
            print(f"An error occurred: {e}")
            return None


class DatasetInfo(DatasetClient):
    def get_dataset_info(self):
        datasets = self.fetch_datasets()
        if datasets and datasets.get('code') == 0:
            return [{'id': dataset['id'], 'name': dataset['name']} for dataset in datasets.get('data', [])]
        else:
            print("Failed to fetch datasets or invalid response.")
            return []


class GetDatasetDocumentsView(APIView):
    def get(self, request):
        # 使用DatasetInfo类来获取所有数据集的信息
        dataset_client = DatasetInfo()
        datasets_info = dataset_client.get_dataset_info()
        # 提取所有数据集的ID和名称
        dataset_ids = [dataset['id'] for dataset in datasets_info]
        datasets_name = [dataset['name'] for dataset in datasets_info]
        dataset_id = request.query_params.get('id', '0d11a324b86111efa968a125d0045eb8')
        rag_url = f"http://10.123.66.2:9380/api/v1/datasets/{dataset_id}/documents"
        params = {
            'page_size': 9999
        }
        headers = {
            'Authorization': "Bearer ragflow-g5MmNiNDQ2OTgzZTExZWZhYmY0NzUzMT"
        }
        response = requests.get(rag_url, headers=headers, params=params)
        response.raise_for_status()
        data = response.json()
        document_id = [doc['id'] for doc in data['data']['docs']]
        document_names = [os.path.splitext(doc['name'])[0] for doc in data['data']['docs']]
        result = {
            'dataset_ids': dataset_ids,  # 包含所有数据集的ID和名称
            'datasets_names': datasets_name,
            'document_id': document_id,
            'document_names': document_names
        }
        return Response({'data': result})


class RagFlowView(APIView):
    def post(self, request):
        api_key = "ragflow-g5MmNiNDQ2OTgzZTExZWZhYmY0NzUzMT"  # 应该使用环境变量或配置管理
        base_url = "http://10.123.66.2:9380"
        rag_url = "http://10.123.66.2:9380/api/v1/retrieval"
        headers = {
            'Authorization': f"Bearer {api_key}",
            'Content-Type': 'application/json'
        }
        question = request.data.get('question')
        # 从请求数据中获取 dataset_ids，如果没有传入则默认为 '0d11a324b86111efa968a125d0045eb8'
        dataset_ids = request.data.get('dataset_ids')
        if dataset_ids is None:
            dataset_ids = '0d11a324b86111efa968a125d0045eb8'
        document_ids = request.data.get('document_ids', None)
        if not question:
            return Response({'error': 'Question parameter is required'}, status=status.HTTP_400_BAD_REQUEST)
        # 构造请求数据
        data = {
            "question": question,
            "dataset_ids": [dataset_ids],
        }
        # 只有当 document_ids 不为空时才添加到 data 中
        if document_ids:
            data["document_ids"] = [document_ids]
        try:
            response = requests.post(rag_url, json=data, headers=headers)
            response.raise_for_status()  # 如果响应状态码不是200，抛出异常
        except requests.exceptions.RequestException as e:
            return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        result = response.json()
        contents = []
        if 'data' in result and 'chunks' in result['data']:
            chunks = result['data']['chunks']
            for chunk in chunks:
                if 'content' in chunk:
                    contents.append(chunk['content'])

        pattern = r"(Defect ID|Title|Category|Component|Description|Resolution|Root Cause|Status|Product_name):\s*(.*?)(?=(Defect ID|Title|Category|Component|Description|Resolution|Root Cause|Status|Product_name):|$)"
        results = []

        def clean_text(text, key=None):
            # 使用 BeautifulSoup 移除 HTML 标签
            soup = BeautifulSoup(text, 'html.parser')
            cleaned_text = soup.get_text(separator=" ")
            # 移除多余的空白字符，包括\t 和多余的空格
            cleaned_text = re.sub(r'\s+', ' ', cleaned_text).strip()
            # 移除所有的分号
            cleaned_text = re.sub(r';', '', cleaned_text)
            if key == 'Title':
                # 去除 Title 中的 [] 及其内部的内容
                cleaned_text = re.sub(r'\[.*?\]', '', cleaned_text)
            return cleaned_text

        for content in contents:
            match_dict = {}
            for match in re.finditer(pattern, content, re.IGNORECASE | re.DOTALL):
                key = match.group(1).strip()
                value = match.group(2).strip()
                # 清理 value 中的 HTML 标签和多余空白字符
                clean_value = clean_text(value, key)
                match_dict[key] = clean_value

            if match_dict:
                results.append(match_dict)
        # 检查结果是否为空，并返回相应的响应
        if not results:
            return Response({'message': '没有搜索到结果'}, status=status.HTTP_200_OK)
        else:
            return Response(results, status=status.HTTP_200_OK)
